repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
wlsfx/bnbb
| 262,278
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/p384_montjscalarmul_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery-Jacobian form scalar multiplication for P-384
// Input scalar[6], point[18]; output res[18]
//
// extern void p384_montjscalarmul_alt
// (uint64_t res[static 18],
// const uint64_t scalar[static 6],
// const uint64_t point[static 18]);
//
// This function is a variant of its affine point version p384_scalarmul.
// Here, input and output points are assumed to be in Jacobian form with
// their coordinates in the Montgomery domain. Thus, if priming indicates
// Montgomery form, x' = (2^384 * x) mod p_384 etc., each point argument
// is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when
// z' is nonzero or the point at infinity (group identity) if z' = 0.
//
// Given scalar = n and point = P, assumed to be on the NIST elliptic
// curve P-384, returns a representation of n * P. If the result is the
// point at infinity (either because the input point was or because the
// scalar was a multiple of p_384) then the output is guaranteed to
// represent the point at infinity, i.e. to have its z coordinate zero.
//
// Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point
// Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjscalarmul_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjscalarmul_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjscalarmul_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
#define JACSIZE (3*NUMSIZE)
// Intermediate variables on the stack.
// The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE
// Uppercase syntactic variants make x86_att version simpler to generate.
#define SCALARB (0*NUMSIZE)
#define scalarb (0*NUMSIZE)(%rsp)
#define ACC (1*NUMSIZE)
#define acc (1*NUMSIZE)(%rsp)
#define TABENT (4*NUMSIZE)
#define tabent (4*NUMSIZE)(%rsp)
#define TAB (7*NUMSIZE)
#define tab (7*NUMSIZE)(%rsp)
#define res (55*NUMSIZE)(%rsp)
#define NSPACE 56*NUMSIZE
// Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator,
// which doesn't accept repetitions, assembler macros etc.
#define selectblock_xz(I) \
cmpq $I, %rdi ; \
cmovzq TAB+JACSIZE*(I-1)(%rsp), %rax ; \
cmovzq TAB+JACSIZE*(I-1)+8(%rsp), %rbx ; \
cmovzq TAB+JACSIZE*(I-1)+16(%rsp), %rcx ; \
cmovzq TAB+JACSIZE*(I-1)+24(%rsp), %rdx ; \
cmovzq TAB+JACSIZE*(I-1)+32(%rsp), %r8 ; \
cmovzq TAB+JACSIZE*(I-1)+40(%rsp), %r9 ; \
cmovzq TAB+JACSIZE*(I-1)+96(%rsp), %r10 ; \
cmovzq TAB+JACSIZE*(I-1)+104(%rsp), %r11 ; \
cmovzq TAB+JACSIZE*(I-1)+112(%rsp), %r12 ; \
cmovzq TAB+JACSIZE*(I-1)+120(%rsp), %r13 ; \
cmovzq TAB+JACSIZE*(I-1)+128(%rsp), %r14 ; \
cmovzq TAB+JACSIZE*(I-1)+136(%rsp), %r15
#define selectblock_y(I) \
cmpq $I, %rdi ; \
cmovzq TAB+JACSIZE*(I-1)+48(%rsp), %rax ; \
cmovzq TAB+JACSIZE*(I-1)+56(%rsp), %rbx ; \
cmovzq TAB+JACSIZE*(I-1)+64(%rsp), %rcx ; \
cmovzq TAB+JACSIZE*(I-1)+72(%rsp), %rdx ; \
cmovzq TAB+JACSIZE*(I-1)+80(%rsp), %r8 ; \
cmovzq TAB+JACSIZE*(I-1)+88(%rsp), %r9
S2N_BN_SYMBOL(p384_montjscalarmul_alt):
CFI_START
_CET_ENDBR
// The Windows version literally calls the standard ABI version.
// This simplifies the proofs since subroutine offsets are fixed.
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
CFI_CALL(Lp384_montjscalarmul_alt_standard)
CFI_POP(%rsi)
CFI_POP(%rdi)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p384_montjscalarmul_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp384_montjscalarmul_alt_standard)
Lp384_montjscalarmul_alt_standard:
CFI_START
#endif
// Real start of the standard ABI code.
CFI_PUSH(%r15)
CFI_PUSH(%r14)
CFI_PUSH(%r13)
CFI_PUSH(%r12)
CFI_PUSH(%rbp)
CFI_PUSH(%rbx)
CFI_DEC_RSP(NSPACE)
// Preserve the "res" input argument; others get processed early.
movq %rdi, res
// Reduce the input scalar mod n_384, i.e. conditionally subtract n_384.
// Store it to "scalarb".
movq (%rsi), %r8
movq $0xecec196accc52973, %rax
subq %rax, %r8
movq 8(%rsi), %r9
movq $0x581a0db248b0a77a, %rax
sbbq %rax, %r9
movq 16(%rsi), %r10
movq $0xc7634d81f4372ddf, %rax
sbbq %rax, %r10
movq 24(%rsi), %r11
movq $0xffffffffffffffff, %rax
sbbq %rax, %r11
movq 32(%rsi), %r12
sbbq %rax, %r12
movq 40(%rsi), %r13
sbbq %rax, %r13
cmovcq (%rsi), %r8
cmovcq 8(%rsi), %r9
cmovcq 16(%rsi), %r10
cmovcq 24(%rsi), %r11
cmovcq 32(%rsi), %r12
cmovcq 40(%rsi), %r13
movq %r8, SCALARB(%rsp)
movq %r9, SCALARB+8(%rsp)
movq %r10, SCALARB+16(%rsp)
movq %r11, SCALARB+24(%rsp)
movq %r12, SCALARB+32(%rsp)
movq %r13, SCALARB+40(%rsp)
// Set the tab[0] table entry to the input point = 1 * P
movq (%rdx), %rax
movq %rax, TAB(%rsp)
movq 8(%rdx), %rax
movq %rax, TAB+8(%rsp)
movq 16(%rdx), %rax
movq %rax, TAB+16(%rsp)
movq 24(%rdx), %rax
movq %rax, TAB+24(%rsp)
movq 32(%rdx), %rax
movq %rax, TAB+32(%rsp)
movq 40(%rdx), %rax
movq %rax, TAB+40(%rsp)
movq 48(%rdx), %rax
movq %rax, TAB+48(%rsp)
movq 56(%rdx), %rax
movq %rax, TAB+56(%rsp)
movq 64(%rdx), %rax
movq %rax, TAB+64(%rsp)
movq 72(%rdx), %rax
movq %rax, TAB+72(%rsp)
movq 80(%rdx), %rax
movq %rax, TAB+80(%rsp)
movq 88(%rdx), %rax
movq %rax, TAB+88(%rsp)
movq 96(%rdx), %rax
movq %rax, TAB+96(%rsp)
movq 104(%rdx), %rax
movq %rax, TAB+104(%rsp)
movq 112(%rdx), %rax
movq %rax, TAB+112(%rsp)
movq 120(%rdx), %rax
movq %rax, TAB+120(%rsp)
movq 128(%rdx), %rax
movq %rax, TAB+128(%rsp)
movq 136(%rdx), %rax
movq %rax, TAB+136(%rsp)
// Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P
leaq TAB+JACSIZE*1(%rsp), %rdi
leaq TAB(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq TAB+JACSIZE*2(%rsp), %rdi
leaq TAB+JACSIZE*1(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjadd)
leaq TAB+JACSIZE*3(%rsp), %rdi
leaq TAB+JACSIZE*1(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq TAB+JACSIZE*4(%rsp), %rdi
leaq TAB+JACSIZE*3(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjadd)
leaq TAB+JACSIZE*5(%rsp), %rdi
leaq TAB+JACSIZE*2(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq TAB+JACSIZE*6(%rsp), %rdi
leaq TAB+JACSIZE*5(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjadd)
leaq TAB+JACSIZE*7(%rsp), %rdi
leaq TAB+JACSIZE*3(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq TAB+JACSIZE*8(%rsp), %rdi
leaq TAB+JACSIZE*7(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjadd)
leaq TAB+JACSIZE*9(%rsp), %rdi
leaq TAB+JACSIZE*4(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq TAB+JACSIZE*10(%rsp), %rdi
leaq TAB+JACSIZE*9(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjadd)
leaq TAB+JACSIZE*11(%rsp), %rdi
leaq TAB+JACSIZE*5(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq TAB+JACSIZE*12(%rsp), %rdi
leaq TAB+JACSIZE*11(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjadd)
leaq TAB+JACSIZE*13(%rsp), %rdi
leaq TAB+JACSIZE*6(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq TAB+JACSIZE*14(%rsp), %rdi
leaq TAB+JACSIZE*13(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjadd)
leaq TAB+JACSIZE*15(%rsp), %rdi
leaq TAB+JACSIZE*7(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
// Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed
// digits. The digits of the constant, in lowest-to-highest order, are as
// follows; they are generated dynamically to use fewer large constant loads.
//
// 0x0842108421084210
// 0x1084210842108421
// 0x2108421084210842
// 0x4210842108421084
// 0x8421084210842108
// 0x0842108421084210
movq $0x1084210842108421, %rax
movq %rax, %rcx
shrq $1, %rax
movq SCALARB(%rsp), %r8
addq %rax, %r8
movq SCALARB+8(%rsp), %r9
adcq %rcx, %r9
leaq (%rcx,%rcx), %rcx
movq SCALARB+16(%rsp), %r10
adcq %rcx, %r10
leaq (%rcx,%rcx), %rcx
movq SCALARB+24(%rsp), %r11
adcq %rcx, %r11
leaq (%rcx,%rcx), %rcx
movq SCALARB+32(%rsp), %r12
adcq %rcx, %r12
movq SCALARB+40(%rsp), %r13
adcq %rax, %r13
sbbq %rdi, %rdi
negq %rdi
// Record the top bitfield in %rdi then shift the whole scalar left 4 bits
// to align the top of the next bitfield with the MSB (bits 379..383).
shldq $4, %r13, %rdi
shldq $4, %r12, %r13
shldq $4, %r11, %r12
shldq $4, %r10, %r11
shldq $4, %r9, %r10
shldq $4, %r8, %r9
shlq $4, %r8
movq %r8, SCALARB(%rsp)
movq %r9, SCALARB+8(%rsp)
movq %r10, SCALARB+16(%rsp)
movq %r11, SCALARB+24(%rsp)
movq %r12, SCALARB+32(%rsp)
movq %r13, SCALARB+40(%rsp)
// Initialize the accumulator to the corresponding entry using constant-time
// lookup in the table. This top digit, uniquely, is not recoded so there is
// no sign adjustment to make. On the x86 integer side we don't have enough
// registers to hold all the fields; this could be better done with SIMD
// registers anyway. So we do x and z coordinates in one sweep, y in another
// (this is a rehearsal for below where we might need to negate the y).
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
xorl %r10d, %r10d
xorl %r11d, %r11d
xorl %r12d, %r12d
xorl %r13d, %r13d
xorl %r14d, %r14d
xorl %r15d, %r15d
selectblock_xz(1)
selectblock_xz(2)
selectblock_xz(3)
selectblock_xz(4)
selectblock_xz(5)
selectblock_xz(6)
selectblock_xz(7)
selectblock_xz(8)
selectblock_xz(9)
selectblock_xz(10)
selectblock_xz(11)
selectblock_xz(12)
selectblock_xz(13)
selectblock_xz(14)
selectblock_xz(15)
selectblock_xz(16)
movq %rax, ACC(%rsp)
movq %rbx, ACC+8(%rsp)
movq %rcx, ACC+16(%rsp)
movq %rdx, ACC+24(%rsp)
movq %r8, ACC+32(%rsp)
movq %r9, ACC+40(%rsp)
movq %r10, ACC+96(%rsp)
movq %r11, ACC+104(%rsp)
movq %r12, ACC+112(%rsp)
movq %r13, ACC+120(%rsp)
movq %r14, ACC+128(%rsp)
movq %r15, ACC+136(%rsp)
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
selectblock_y(1)
selectblock_y(2)
selectblock_y(3)
selectblock_y(4)
selectblock_y(5)
selectblock_y(6)
selectblock_y(7)
selectblock_y(8)
selectblock_y(9)
selectblock_y(10)
selectblock_y(11)
selectblock_y(12)
selectblock_y(13)
selectblock_y(14)
selectblock_y(15)
selectblock_y(16)
movq %rax, ACC+48(%rsp)
movq %rbx, ACC+56(%rsp)
movq %rcx, ACC+64(%rsp)
movq %rdx, ACC+72(%rsp)
movq %r8, ACC+80(%rsp)
movq %r9, ACC+88(%rsp)
// Main loop over size-5 bitfields: double 5 times then add signed digit
// At each stage we shift the scalar left by 5 bits so we can simply pick
// the top 5 bits as the bitfield, saving some fiddle over indexing.
movl $380, %ebp
Lp384_montjscalarmul_alt_mainloop:
subq $5, %rbp
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjdouble)
// Choose the bitfield and adjust it to sign and magnitude
movq SCALARB(%rsp), %r8
movq SCALARB+8(%rsp), %r9
movq SCALARB+16(%rsp), %r10
movq SCALARB+24(%rsp), %r11
movq SCALARB+32(%rsp), %r12
movq SCALARB+40(%rsp), %r13
movq %r13, %rdi
shrq $59, %rdi
shldq $5, %r12, %r13
shldq $5, %r11, %r12
shldq $5, %r10, %r11
shldq $5, %r9, %r10
shldq $5, %r8, %r9
shlq $5, %r8
movq %r8, SCALARB(%rsp)
movq %r9, SCALARB+8(%rsp)
movq %r10, SCALARB+16(%rsp)
movq %r11, SCALARB+24(%rsp)
movq %r12, SCALARB+32(%rsp)
movq %r13, SCALARB+40(%rsp)
subq $16, %rdi
sbbq %rsi, %rsi // %rsi = sign of digit (-1 = negative)
xorq %rsi, %rdi
subq %rsi, %rdi // %rdi = absolute value of digit
// Conditionally select the table entry tab[i-1] = i * P in constant time
// Again, this is done in two sweeps, first doing x and z then y.
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
xorl %r10d, %r10d
xorl %r11d, %r11d
xorl %r12d, %r12d
xorl %r13d, %r13d
xorl %r14d, %r14d
xorl %r15d, %r15d
selectblock_xz(1)
selectblock_xz(2)
selectblock_xz(3)
selectblock_xz(4)
selectblock_xz(5)
selectblock_xz(6)
selectblock_xz(7)
selectblock_xz(8)
selectblock_xz(9)
selectblock_xz(10)
selectblock_xz(11)
selectblock_xz(12)
selectblock_xz(13)
selectblock_xz(14)
selectblock_xz(15)
selectblock_xz(16)
movq %rax, TABENT(%rsp)
movq %rbx, TABENT+8(%rsp)
movq %rcx, TABENT+16(%rsp)
movq %rdx, TABENT+24(%rsp)
movq %r8, TABENT+32(%rsp)
movq %r9, TABENT+40(%rsp)
movq %r10, TABENT+96(%rsp)
movq %r11, TABENT+104(%rsp)
movq %r12, TABENT+112(%rsp)
movq %r13, TABENT+120(%rsp)
movq %r14, TABENT+128(%rsp)
movq %r15, TABENT+136(%rsp)
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
selectblock_y(1)
selectblock_y(2)
selectblock_y(3)
selectblock_y(4)
selectblock_y(5)
selectblock_y(6)
selectblock_y(7)
selectblock_y(8)
selectblock_y(9)
selectblock_y(10)
selectblock_y(11)
selectblock_y(12)
selectblock_y(13)
selectblock_y(14)
selectblock_y(15)
selectblock_y(16)
// Store it to "tabent" with the y coordinate optionally negated.
// This is done carefully to give coordinates < p_384 even in
// the degenerate case y = 0 (when z = 0 for points on the curve).
// The digits of the prime p_384 are generated dynamically from
// the zeroth via not/lea to reduce the number of constant loads.
movq %rax, %r10
orq %rbx, %r10
movq %rcx, %r11
orq %rdx, %r11
movq %r8, %r12
orq %r9, %r12
orq %r11, %r10
orq %r12, %r10
cmovzq %r10, %rsi
movl $0xffffffff, %r10d
movq %r10, %r11
notq %r11
leaq (%r10,%r11), %r13
subq %rax, %r10
leaq -1(%r13), %r12
sbbq %rbx, %r11
movq %r13, %r14
sbbq %rcx, %r12
sbbq %rdx, %r13
movq %r14, %r15
sbbq %r8, %r14
sbbq %r9, %r15
testq %rsi, %rsi
cmovnzq %r10, %rax
cmovnzq %r11, %rbx
cmovnzq %r12, %rcx
cmovnzq %r13, %rdx
cmovnzq %r14, %r8
cmovnzq %r15, %r9
movq %rax, TABENT+48(%rsp)
movq %rbx, TABENT+56(%rsp)
movq %rcx, TABENT+64(%rsp)
movq %rdx, TABENT+72(%rsp)
movq %r8, TABENT+80(%rsp)
movq %r9, TABENT+88(%rsp)
// Add to the accumulator
leaq TABENT(%rsp), %rdx
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_alt_p384_montjadd)
testq %rbp, %rbp
jne Lp384_montjscalarmul_alt_mainloop
// That's the end of the main loop, and we just need to copy the
// result in "acc" to the output.
movq res, %rdi
movq ACC(%rsp), %rax
movq %rax, (%rdi)
movq ACC+8(%rsp), %rax
movq %rax, 8(%rdi)
movq ACC+16(%rsp), %rax
movq %rax, 16(%rdi)
movq ACC+24(%rsp), %rax
movq %rax, 24(%rdi)
movq ACC+32(%rsp), %rax
movq %rax, 32(%rdi)
movq ACC+40(%rsp), %rax
movq %rax, 40(%rdi)
movq ACC+48(%rsp), %rax
movq %rax, 48(%rdi)
movq ACC+56(%rsp), %rax
movq %rax, 56(%rdi)
movq ACC+64(%rsp), %rax
movq %rax, 64(%rdi)
movq ACC+72(%rsp), %rax
movq %rax, 72(%rdi)
movq ACC+80(%rsp), %rax
movq %rax, 80(%rdi)
movq ACC+88(%rsp), %rax
movq %rax, 88(%rdi)
movq ACC+96(%rsp), %rax
movq %rax, 96(%rdi)
movq ACC+104(%rsp), %rax
movq %rax, 104(%rdi)
movq ACC+112(%rsp), %rax
movq %rax, 112(%rdi)
movq ACC+120(%rsp), %rax
movq %rax, 120(%rdi)
movq ACC+128(%rsp), %rax
movq %rax, 128(%rdi)
movq ACC+136(%rsp), %rax
movq %rax, 136(%rdi)
// Restore stack and registers and return
CFI_INC_RSP(NSPACE)
CFI_POP(%rbx)
CFI_POP(%rbp)
CFI_POP(%r12)
CFI_POP(%r13)
CFI_POP(%r14)
CFI_POP(%r15)
CFI_RET
#if WINDOWS_ABI
S2N_BN_SIZE_DIRECTIVE(Lp384_montjscalarmul_alt_standard)
#else
S2N_BN_SIZE_DIRECTIVE(p384_montjscalarmul_alt)
#endif
// Local copies of subroutines, complete clones at the moment
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp384_montjscalarmul_alt_p384_montjadd)
Lp384_montjscalarmul_alt_p384_montjadd:
CFI_START
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(352)
movq %rsi, 0x150(%rsp)
movq %rdx, 0x158(%rsp)
movq 0x60(%rsi), %rbx
movq 0x68(%rsi), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x78(%rsi), %rax
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x88(%rsi), %rax
mulq %rbx
movq %rax, %r13
movq %rdx, %r14
movq 0x78(%rsi), %rax
mulq 0x80(%rsi)
movq %rax, %r15
movq %rdx, %rcx
movq 0x70(%rsi), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x68(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbp, %rbp
movq 0x68(%rsi), %rbx
movq 0x78(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x80(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x88(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x0, %rcx
movq 0x80(%rsi), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x70(%rsi), %rbx
movq 0x78(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x80(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x88(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r15
adcq %rdx, %rcx
sbbq %rbp, %rbp
xorl %ebx, %ebx
movq 0x78(%rsi), %rax
mulq 0x88(%rsi)
subq %rbp, %rdx
xorl %ebp, %ebp
addq %rax, %rcx
adcq %rdx, %rbx
adcl %ebp, %ebp
movq 0x80(%rsi), %rax
mulq 0x88(%rsi)
addq %rax, %rbx
adcq %rdx, %rbp
xorl %r8d, %r8d
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq %rcx, %rcx
adcq %rbx, %rbx
adcq %rbp, %rbp
adcl %r8d, %r8d
movq 0x60(%rsi), %rax
mulq %rax
movq %r8, (%rsp)
movq %rax, %r8
movq 0x68(%rsi), %rax
movq %rbp, 0x8(%rsp)
addq %rdx, %r9
sbbq %rbp, %rbp
mulq %rax
negq %rbp
adcq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x70(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x78(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x80(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %rcx
adcq %rdx, %rbx
sbbq %rbp, %rbp
movq 0x88(%rsi), %rax
mulq %rax
negq %rbp
adcq 0x8(%rsp), %rax
adcq (%rsp), %rdx
movq %rax, %rbp
movq %rdx, %rsi
movq %rbx, (%rsp)
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r8
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r8, %r9
sbbq %rdx, %r10
sbbq %rax, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rbx, %r8
sbbq $0x0, %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r9
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r9, %r10
sbbq %rdx, %r11
sbbq %rax, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rbx, %r9
sbbq $0x0, %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r10
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r10, %r11
sbbq %rdx, %r12
sbbq %rax, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rbx, %r10
sbbq $0x0, %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r11
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r11, %r12
sbbq %rdx, %r13
sbbq %rax, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rbx, %r11
sbbq $0x0, %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r12
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r12, %r13
sbbq %rdx, %r8
sbbq %rax, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rbx, %r12
sbbq $0x0, %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r13
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r13, %r8
sbbq %rdx, %r9
sbbq %rax, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rbx, %r13
sbbq $0x0, %r13
movq (%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rsi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rsi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rsi
movq %r14, (%rsp)
movq %r15, 0x8(%rsp)
movq %rcx, 0x10(%rsp)
movq %rbx, 0x18(%rsp)
movq %rbp, 0x20(%rsp)
movq %rsi, 0x28(%rsp)
movq 0x158(%rsp), %rsi
movq 0x60(%rsi), %rbx
movq 0x68(%rsi), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x78(%rsi), %rax
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x88(%rsi), %rax
mulq %rbx
movq %rax, %r13
movq %rdx, %r14
movq 0x78(%rsi), %rax
mulq 0x80(%rsi)
movq %rax, %r15
movq %rdx, %rcx
movq 0x70(%rsi), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x68(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbp, %rbp
movq 0x68(%rsi), %rbx
movq 0x78(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x80(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x88(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x0, %rcx
movq 0x80(%rsi), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x70(%rsi), %rbx
movq 0x78(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x80(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x88(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r15
adcq %rdx, %rcx
sbbq %rbp, %rbp
xorl %ebx, %ebx
movq 0x78(%rsi), %rax
mulq 0x88(%rsi)
subq %rbp, %rdx
xorl %ebp, %ebp
addq %rax, %rcx
adcq %rdx, %rbx
adcl %ebp, %ebp
movq 0x80(%rsi), %rax
mulq 0x88(%rsi)
addq %rax, %rbx
adcq %rdx, %rbp
xorl %r8d, %r8d
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq %rcx, %rcx
adcq %rbx, %rbx
adcq %rbp, %rbp
adcl %r8d, %r8d
movq 0x60(%rsi), %rax
mulq %rax
movq %r8, 0xf0(%rsp)
movq %rax, %r8
movq 0x68(%rsi), %rax
movq %rbp, 0xf8(%rsp)
addq %rdx, %r9
sbbq %rbp, %rbp
mulq %rax
negq %rbp
adcq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x70(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x78(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x80(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %rcx
adcq %rdx, %rbx
sbbq %rbp, %rbp
movq 0x88(%rsi), %rax
mulq %rax
negq %rbp
adcq 0xf8(%rsp), %rax
adcq 0xf0(%rsp), %rdx
movq %rax, %rbp
movq %rdx, %rsi
movq %rbx, 0xf0(%rsp)
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r8
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r8, %r9
sbbq %rdx, %r10
sbbq %rax, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rbx, %r8
sbbq $0x0, %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r9
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r9, %r10
sbbq %rdx, %r11
sbbq %rax, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rbx, %r9
sbbq $0x0, %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r10
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r10, %r11
sbbq %rdx, %r12
sbbq %rax, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rbx, %r10
sbbq $0x0, %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r11
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r11, %r12
sbbq %rdx, %r13
sbbq %rax, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rbx, %r11
sbbq $0x0, %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r12
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r12, %r13
sbbq %rdx, %r8
sbbq %rax, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rbx, %r12
sbbq $0x0, %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r13
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r13, %r8
sbbq %rdx, %r9
sbbq %rax, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rbx, %r13
sbbq $0x0, %r13
movq 0xf0(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rsi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rsi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rsi
movq %r14, 0xf0(%rsp)
movq %r15, 0xf8(%rsp)
movq %rcx, 0x100(%rsp)
movq %rbx, 0x108(%rsp)
movq %rbp, 0x110(%rsp)
movq %rsi, 0x118(%rsp)
movq 0x150(%rsp), %rsi
movq 0x158(%rsp), %rcx
movq 0x30(%rsi), %rbx
movq 0x60(%rcx), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0x68(%rcx), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x70(%rcx), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x78(%rcx), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x80(%rcx), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x88(%rcx), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x38(%rsi), %rbx
movq 0x60(%rcx), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0x68(%rcx), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x70(%rcx), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x78(%rcx), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x80(%rcx), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x88(%rcx), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x40(%rsi), %rbx
movq 0x60(%rcx), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0x68(%rcx), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x70(%rcx), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x78(%rcx), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x80(%rcx), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x88(%rcx), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x48(%rsi), %rbx
movq 0x60(%rcx), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0x68(%rcx), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x70(%rcx), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x78(%rcx), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x80(%rcx), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x88(%rcx), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x50(%rsi), %rbx
movq 0x60(%rcx), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0x68(%rcx), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x70(%rcx), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x78(%rcx), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x80(%rcx), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x88(%rcx), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x58(%rsi), %rbx
movq 0x60(%rcx), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0x68(%rcx), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x70(%rcx), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x78(%rcx), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x80(%rcx), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x88(%rcx), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x120(%rsp)
movq %r15, 0x128(%rsp)
movq %r8, 0x130(%rsp)
movq %r9, 0x138(%rsp)
movq %r10, 0x140(%rsp)
movq %r11, 0x148(%rsp)
movq 0x150(%rsp), %rsi
movq 0x158(%rsp), %rcx
movq 0x30(%rcx), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0x68(%rsi), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x70(%rsi), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x78(%rsi), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x80(%rsi), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x88(%rsi), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x38(%rcx), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0x68(%rsi), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x70(%rsi), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x78(%rsi), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x80(%rsi), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x88(%rsi), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x40(%rcx), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0x68(%rsi), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x70(%rsi), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x78(%rsi), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x80(%rsi), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x88(%rsi), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x48(%rcx), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0x68(%rsi), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x70(%rsi), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x78(%rsi), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x80(%rsi), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x88(%rsi), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x50(%rcx), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0x68(%rsi), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x70(%rsi), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x78(%rsi), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x80(%rsi), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x88(%rsi), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x58(%rcx), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0x68(%rsi), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x70(%rsi), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x78(%rsi), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x80(%rsi), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x88(%rsi), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq %r8, 0x40(%rsp)
movq %r9, 0x48(%rsp)
movq %r10, 0x50(%rsp)
movq %r11, 0x58(%rsp)
movq 0x158(%rsp), %rcx
movq (%rcx), %rbx
movq (%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0x8(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x10(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x18(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x20(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x28(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x8(%rcx), %rbx
movq (%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0x8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x10(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x18(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x20(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x28(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x10(%rcx), %rbx
movq (%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0x8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x10(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x18(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x20(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x28(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x18(%rcx), %rbx
movq (%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0x8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x10(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x18(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x20(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x28(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x20(%rcx), %rbx
movq (%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0x8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x10(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x18(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x20(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x28(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x28(%rcx), %rbx
movq (%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0x8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x10(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x18(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x20(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x28(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x60(%rsp)
movq %r15, 0x68(%rsp)
movq %r8, 0x70(%rsp)
movq %r9, 0x78(%rsp)
movq %r10, 0x80(%rsp)
movq %r11, 0x88(%rsp)
movq 0x150(%rsp), %rsi
movq (%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0xf8(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x100(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x108(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x110(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x118(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x8(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x100(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x108(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x110(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x118(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x10(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x100(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x108(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x110(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x118(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x18(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x100(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x108(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x110(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x118(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x20(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x100(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x108(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x110(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x118(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x28(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x100(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x108(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x110(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x118(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
movq %r8, 0xd0(%rsp)
movq %r9, 0xd8(%rsp)
movq %r10, 0xe0(%rsp)
movq %r11, 0xe8(%rsp)
movq 0x30(%rsp), %rbx
movq (%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0x8(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x10(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x18(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x20(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x28(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x38(%rsp), %rbx
movq (%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0x8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x10(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x18(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x20(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x28(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x40(%rsp), %rbx
movq (%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0x8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x10(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x18(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x20(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x28(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x48(%rsp), %rbx
movq (%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0x8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x10(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x18(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x20(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x28(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x50(%rsp), %rbx
movq (%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0x8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x10(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x18(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x20(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x28(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x58(%rsp), %rbx
movq (%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0x8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x10(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x18(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x20(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x28(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq %r8, 0x40(%rsp)
movq %r9, 0x48(%rsp)
movq %r10, 0x50(%rsp)
movq %r11, 0x58(%rsp)
movq 0x120(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0xf8(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x100(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x108(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x110(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x118(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x128(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x100(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x108(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x110(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x118(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x130(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x100(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x108(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x110(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x118(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x138(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x100(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x108(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x110(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x118(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x140(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x100(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x108(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x110(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x118(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x148(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x100(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x108(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x110(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x118(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x120(%rsp)
movq %r15, 0x128(%rsp)
movq %r8, 0x130(%rsp)
movq %r9, 0x138(%rsp)
movq %r10, 0x140(%rsp)
movq %r11, 0x148(%rsp)
movq 0x60(%rsp), %rax
subq 0xc0(%rsp), %rax
movq 0x68(%rsp), %rdx
sbbq 0xc8(%rsp), %rdx
movq 0x70(%rsp), %r8
sbbq 0xd0(%rsp), %r8
movq 0x78(%rsp), %r9
sbbq 0xd8(%rsp), %r9
movq 0x80(%rsp), %r10
sbbq 0xe0(%rsp), %r10
movq 0x88(%rsp), %r11
sbbq 0xe8(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, 0xf0(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0xf8(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x100(%rsp)
sbbq $0x0, %r9
movq %r9, 0x108(%rsp)
sbbq $0x0, %r10
movq %r10, 0x110(%rsp)
sbbq $0x0, %r11
movq %r11, 0x118(%rsp)
movq 0x30(%rsp), %rax
subq 0x120(%rsp), %rax
movq 0x38(%rsp), %rdx
sbbq 0x128(%rsp), %rdx
movq 0x40(%rsp), %r8
sbbq 0x130(%rsp), %r8
movq 0x48(%rsp), %r9
sbbq 0x138(%rsp), %r9
movq 0x50(%rsp), %r10
sbbq 0x140(%rsp), %r10
movq 0x58(%rsp), %r11
sbbq 0x148(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, 0x30(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0x38(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x40(%rsp)
sbbq $0x0, %r9
movq %r9, 0x48(%rsp)
sbbq $0x0, %r10
movq %r10, 0x50(%rsp)
sbbq $0x0, %r11
movq %r11, 0x58(%rsp)
movq 0xf0(%rsp), %rbx
movq 0xf8(%rsp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x108(%rsp), %rax
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x118(%rsp), %rax
mulq %rbx
movq %rax, %r13
movq %rdx, %r14
movq 0x108(%rsp), %rax
mulq 0x110(%rsp)
movq %rax, %r15
movq %rdx, %rcx
movq 0x100(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0xf8(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbp, %rbp
movq 0xf8(%rsp), %rbx
movq 0x108(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x110(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x118(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x0, %rcx
movq 0x110(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x100(%rsp), %rbx
movq 0x108(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x110(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x118(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r15
adcq %rdx, %rcx
sbbq %rbp, %rbp
xorl %ebx, %ebx
movq 0x108(%rsp), %rax
mulq 0x118(%rsp)
subq %rbp, %rdx
xorl %ebp, %ebp
addq %rax, %rcx
adcq %rdx, %rbx
adcl %ebp, %ebp
movq 0x110(%rsp), %rax
mulq 0x118(%rsp)
addq %rax, %rbx
adcq %rdx, %rbp
xorl %r8d, %r8d
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq %rcx, %rcx
adcq %rbx, %rbx
adcq %rbp, %rbp
adcl %r8d, %r8d
movq 0xf0(%rsp), %rax
mulq %rax
movq %r8, 0x90(%rsp)
movq %rax, %r8
movq 0xf8(%rsp), %rax
movq %rbp, 0x98(%rsp)
addq %rdx, %r9
sbbq %rbp, %rbp
mulq %rax
negq %rbp
adcq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x100(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x108(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x110(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %rcx
adcq %rdx, %rbx
sbbq %rbp, %rbp
movq 0x118(%rsp), %rax
mulq %rax
negq %rbp
adcq 0x98(%rsp), %rax
adcq 0x90(%rsp), %rdx
movq %rax, %rbp
movq %rdx, %rsi
movq %rbx, 0x90(%rsp)
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r8
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r8, %r9
sbbq %rdx, %r10
sbbq %rax, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rbx, %r8
sbbq $0x0, %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r9
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r9, %r10
sbbq %rdx, %r11
sbbq %rax, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rbx, %r9
sbbq $0x0, %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r10
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r10, %r11
sbbq %rdx, %r12
sbbq %rax, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rbx, %r10
sbbq $0x0, %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r11
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r11, %r12
sbbq %rdx, %r13
sbbq %rax, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rbx, %r11
sbbq $0x0, %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r12
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r12, %r13
sbbq %rdx, %r8
sbbq %rax, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rbx, %r12
sbbq $0x0, %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r13
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r13, %r8
sbbq %rdx, %r9
sbbq %rax, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rbx, %r13
sbbq $0x0, %r13
movq 0x90(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rsi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rsi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rsi
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq %rcx, 0xa0(%rsp)
movq %rbx, 0xa8(%rsp)
movq %rbp, 0xb0(%rsp)
movq %rsi, 0xb8(%rsp)
movq 0x30(%rsp), %rbx
movq 0x38(%rsp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x48(%rsp), %rax
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x58(%rsp), %rax
mulq %rbx
movq %rax, %r13
movq %rdx, %r14
movq 0x48(%rsp), %rax
mulq 0x50(%rsp)
movq %rax, %r15
movq %rdx, %rcx
movq 0x40(%rsp), %rbx
movq 0x30(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x38(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbp, %rbp
movq 0x38(%rsp), %rbx
movq 0x48(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x50(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x58(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x0, %rcx
movq 0x50(%rsp), %rbx
movq 0x30(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x40(%rsp), %rbx
movq 0x48(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x50(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x58(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r15
adcq %rdx, %rcx
sbbq %rbp, %rbp
xorl %ebx, %ebx
movq 0x48(%rsp), %rax
mulq 0x58(%rsp)
subq %rbp, %rdx
xorl %ebp, %ebp
addq %rax, %rcx
adcq %rdx, %rbx
adcl %ebp, %ebp
movq 0x50(%rsp), %rax
mulq 0x58(%rsp)
addq %rax, %rbx
adcq %rdx, %rbp
xorl %r8d, %r8d
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq %rcx, %rcx
adcq %rbx, %rbx
adcq %rbp, %rbp
adcl %r8d, %r8d
movq 0x30(%rsp), %rax
mulq %rax
movq %r8, (%rsp)
movq %rax, %r8
movq 0x38(%rsp), %rax
movq %rbp, 0x8(%rsp)
addq %rdx, %r9
sbbq %rbp, %rbp
mulq %rax
negq %rbp
adcq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x40(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x48(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x50(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %rcx
adcq %rdx, %rbx
sbbq %rbp, %rbp
movq 0x58(%rsp), %rax
mulq %rax
negq %rbp
adcq 0x8(%rsp), %rax
adcq (%rsp), %rdx
movq %rax, %rbp
movq %rdx, %rsi
movq %rbx, (%rsp)
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r8
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r8, %r9
sbbq %rdx, %r10
sbbq %rax, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rbx, %r8
sbbq $0x0, %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r9
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r9, %r10
sbbq %rdx, %r11
sbbq %rax, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rbx, %r9
sbbq $0x0, %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r10
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r10, %r11
sbbq %rdx, %r12
sbbq %rax, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rbx, %r10
sbbq $0x0, %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r11
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r11, %r12
sbbq %rdx, %r13
sbbq %rax, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rbx, %r11
sbbq $0x0, %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r12
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r12, %r13
sbbq %rdx, %r8
sbbq %rax, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rbx, %r12
sbbq $0x0, %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r13
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r13, %r8
sbbq %rdx, %r9
sbbq %rax, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rbx, %r13
sbbq $0x0, %r13
movq (%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rsi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rsi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rsi
movq %r14, (%rsp)
movq %r15, 0x8(%rsp)
movq %rcx, 0x10(%rsp)
movq %rbx, 0x18(%rsp)
movq %rbp, 0x20(%rsp)
movq %rsi, 0x28(%rsp)
movq 0xc0(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0x98(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0xa0(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0xa8(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0xb0(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0xb8(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0xc8(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0x98(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0xd0(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0x98(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0xd8(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0x98(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0xe0(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0x98(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0xe8(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0x98(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
movq %r8, 0xd0(%rsp)
movq %r9, 0xd8(%rsp)
movq %r10, 0xe0(%rsp)
movq %r11, 0xe8(%rsp)
movq 0x60(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0x98(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0xa0(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0xa8(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0xb0(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0xb8(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x68(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0x98(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x70(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0x98(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x78(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0x98(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x80(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0x98(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x88(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0x98(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x60(%rsp)
movq %r15, 0x68(%rsp)
movq %r8, 0x70(%rsp)
movq %r9, 0x78(%rsp)
movq %r10, 0x80(%rsp)
movq %r11, 0x88(%rsp)
movq (%rsp), %rax
subq 0xc0(%rsp), %rax
movq 0x8(%rsp), %rdx
sbbq 0xc8(%rsp), %rdx
movq 0x10(%rsp), %r8
sbbq 0xd0(%rsp), %r8
movq 0x18(%rsp), %r9
sbbq 0xd8(%rsp), %r9
movq 0x20(%rsp), %r10
sbbq 0xe0(%rsp), %r10
movq 0x28(%rsp), %r11
sbbq 0xe8(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, (%rsp)
sbbq %rcx, %rdx
movq %rdx, 0x8(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x10(%rsp)
sbbq $0x0, %r9
movq %r9, 0x18(%rsp)
sbbq $0x0, %r10
movq %r10, 0x20(%rsp)
sbbq $0x0, %r11
movq %r11, 0x28(%rsp)
movq 0x60(%rsp), %rax
subq 0xc0(%rsp), %rax
movq 0x68(%rsp), %rdx
sbbq 0xc8(%rsp), %rdx
movq 0x70(%rsp), %r8
sbbq 0xd0(%rsp), %r8
movq 0x78(%rsp), %r9
sbbq 0xd8(%rsp), %r9
movq 0x80(%rsp), %r10
sbbq 0xe0(%rsp), %r10
movq 0x88(%rsp), %r11
sbbq 0xe8(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, 0x90(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0x98(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0xa0(%rsp)
sbbq $0x0, %r9
movq %r9, 0xa8(%rsp)
sbbq $0x0, %r10
movq %r10, 0xb0(%rsp)
sbbq $0x0, %r11
movq %r11, 0xb8(%rsp)
movq 0x150(%rsp), %rsi
movq 0x60(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0xf8(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x100(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x108(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x110(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x118(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x68(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x100(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x108(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x110(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x118(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x70(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x100(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x108(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x110(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x118(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x78(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x100(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x108(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x110(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x118(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x80(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x100(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x108(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x110(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x118(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x88(%rsi), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x100(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x108(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x110(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x118(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xf0(%rsp)
movq %r15, 0xf8(%rsp)
movq %r8, 0x100(%rsp)
movq %r9, 0x108(%rsp)
movq %r10, 0x110(%rsp)
movq %r11, 0x118(%rsp)
movq (%rsp), %rax
subq 0x60(%rsp), %rax
movq 0x8(%rsp), %rdx
sbbq 0x68(%rsp), %rdx
movq 0x10(%rsp), %r8
sbbq 0x70(%rsp), %r8
movq 0x18(%rsp), %r9
sbbq 0x78(%rsp), %r9
movq 0x20(%rsp), %r10
sbbq 0x80(%rsp), %r10
movq 0x28(%rsp), %r11
sbbq 0x88(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, (%rsp)
sbbq %rcx, %rdx
movq %rdx, 0x8(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x10(%rsp)
sbbq $0x0, %r9
movq %r9, 0x18(%rsp)
sbbq $0x0, %r10
movq %r10, 0x20(%rsp)
sbbq $0x0, %r11
movq %r11, 0x28(%rsp)
movq 0xc0(%rsp), %rax
subq (%rsp), %rax
movq 0xc8(%rsp), %rdx
sbbq 0x8(%rsp), %rdx
movq 0xd0(%rsp), %r8
sbbq 0x10(%rsp), %r8
movq 0xd8(%rsp), %r9
sbbq 0x18(%rsp), %r9
movq 0xe0(%rsp), %r10
sbbq 0x20(%rsp), %r10
movq 0xe8(%rsp), %r11
sbbq 0x28(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, 0xc0(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0xc8(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0xd0(%rsp)
sbbq $0x0, %r9
movq %r9, 0xd8(%rsp)
sbbq $0x0, %r10
movq %r10, 0xe0(%rsp)
sbbq $0x0, %r11
movq %r11, 0xe8(%rsp)
movq 0x120(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0x98(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0xa0(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0xa8(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0xb0(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0xb8(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x128(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0x98(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x130(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0x98(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x138(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0x98(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x140(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0x98(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x148(%rsp), %rbx
movq 0x90(%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0x98(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0xa0(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0xa8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0xb0(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0xb8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movq %r9, 0xa8(%rsp)
movq %r10, 0xb0(%rsp)
movq %r11, 0xb8(%rsp)
movq 0x158(%rsp), %rcx
movq 0x60(%rcx), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0xf8(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x100(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x108(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x110(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x118(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x68(%rcx), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x100(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x108(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x110(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x118(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x70(%rcx), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x100(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x108(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x110(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x118(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x78(%rcx), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x100(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x108(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x110(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x118(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x80(%rcx), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x100(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x108(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x110(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x118(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x88(%rcx), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x100(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x108(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x110(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x118(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xf0(%rsp)
movq %r15, 0xf8(%rsp)
movq %r8, 0x100(%rsp)
movq %r9, 0x108(%rsp)
movq %r10, 0x110(%rsp)
movq %r11, 0x118(%rsp)
movq 0xc0(%rsp), %rbx
movq 0x30(%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0x38(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x40(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x48(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x50(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x58(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0xc8(%rsp), %rbx
movq 0x30(%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0x38(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x40(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x48(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x50(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x58(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0xd0(%rsp), %rbx
movq 0x30(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0x38(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x40(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x48(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x50(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x58(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0xd8(%rsp), %rbx
movq 0x30(%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0x38(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x40(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x48(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x50(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x58(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0xe0(%rsp), %rbx
movq 0x30(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0x38(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x40(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x48(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x50(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x58(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0xe8(%rsp), %rbx
movq 0x30(%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0x38(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x40(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x48(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x50(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x58(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
movq %r8, 0xd0(%rsp)
movq %r9, 0xd8(%rsp)
movq %r10, 0xe0(%rsp)
movq %r11, 0xe8(%rsp)
movq 0xc0(%rsp), %rax
subq 0x90(%rsp), %rax
movq 0xc8(%rsp), %rdx
sbbq 0x98(%rsp), %rdx
movq 0xd0(%rsp), %r8
sbbq 0xa0(%rsp), %r8
movq 0xd8(%rsp), %r9
sbbq 0xa8(%rsp), %r9
movq 0xe0(%rsp), %r10
sbbq 0xb0(%rsp), %r10
movq 0xe8(%rsp), %r11
sbbq 0xb8(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, 0xc0(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0xc8(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0xd0(%rsp)
sbbq $0x0, %r9
movq %r9, 0xd8(%rsp)
sbbq $0x0, %r10
movq %r10, 0xe0(%rsp)
sbbq $0x0, %r11
movq %r11, 0xe8(%rsp)
movq 0x158(%rsp), %rcx
movq 0x60(%rcx), %r8
movq 0x68(%rcx), %r9
movq 0x70(%rcx), %r10
movq 0x78(%rcx), %r11
movq 0x80(%rcx), %rbx
movq 0x88(%rcx), %rbp
movq %r8, %rax
movq %r9, %rdx
orq %r10, %rax
orq %r11, %rdx
orq %rbx, %rax
orq %rbp, %rdx
orq %rdx, %rax
negq %rax
sbbq %rax, %rax
movq 0x150(%rsp), %rsi
movq 0x60(%rsi), %r12
movq 0x68(%rsi), %r13
movq 0x70(%rsi), %r14
movq 0x78(%rsi), %r15
movq 0x80(%rsi), %rdx
movq 0x88(%rsi), %rcx
cmoveq %r12, %r8
cmoveq %r13, %r9
cmoveq %r14, %r10
cmoveq %r15, %r11
cmoveq %rdx, %rbx
cmoveq %rcx, %rbp
orq %r13, %r12
orq %r15, %r14
orq %rcx, %rdx
orq %r14, %r12
orq %r12, %rdx
negq %rdx
sbbq %rdx, %rdx
cmpq %rdx, %rax
cmoveq 0xf0(%rsp), %r8
cmoveq 0xf8(%rsp), %r9
cmoveq 0x100(%rsp), %r10
cmoveq 0x108(%rsp), %r11
cmoveq 0x110(%rsp), %rbx
cmoveq 0x118(%rsp), %rbp
movq %r8, 0xf0(%rsp)
movq %r9, 0xf8(%rsp)
movq %r10, 0x100(%rsp)
movq %r11, 0x108(%rsp)
movq %rbx, 0x110(%rsp)
movq %rbp, 0x118(%rsp)
movq 0x158(%rsp), %rcx
movq 0x150(%rsp), %rsi
movq (%rsp), %r8
cmovbq (%rsi), %r8
cmova (%rcx), %r8
movq 0x8(%rsp), %r9
cmovbq 0x8(%rsi), %r9
cmova 0x8(%rcx), %r9
movq 0x10(%rsp), %r10
cmovbq 0x10(%rsi), %r10
cmova 0x10(%rcx), %r10
movq 0x18(%rsp), %r11
cmovbq 0x18(%rsi), %r11
cmova 0x18(%rcx), %r11
movq 0x20(%rsp), %rbx
cmovbq 0x20(%rsi), %rbx
cmova 0x20(%rcx), %rbx
movq 0x28(%rsp), %rbp
cmovbq 0x28(%rsi), %rbp
cmova 0x28(%rcx), %rbp
movq 0xc0(%rsp), %r12
cmovbq 0x30(%rsi), %r12
cmova 0x30(%rcx), %r12
movq 0xc8(%rsp), %r13
cmovbq 0x38(%rsi), %r13
cmova 0x38(%rcx), %r13
movq 0xd0(%rsp), %r14
cmovbq 0x40(%rsi), %r14
cmova 0x40(%rcx), %r14
movq 0xd8(%rsp), %r15
cmovbq 0x48(%rsi), %r15
cmova 0x48(%rcx), %r15
movq 0xe0(%rsp), %rdx
cmovbq 0x50(%rsi), %rdx
cmova 0x50(%rcx), %rdx
movq 0xe8(%rsp), %rax
cmovbq 0x58(%rsi), %rax
cmova 0x58(%rcx), %rax
movq %r8, (%rdi)
movq %r9, 0x8(%rdi)
movq %r10, 0x10(%rdi)
movq %r11, 0x18(%rdi)
movq %rbx, 0x20(%rdi)
movq %rbp, 0x28(%rdi)
movq 0xf0(%rsp), %r8
movq 0xf8(%rsp), %r9
movq 0x100(%rsp), %r10
movq 0x108(%rsp), %r11
movq 0x110(%rsp), %rbx
movq 0x118(%rsp), %rbp
movq %r12, 0x30(%rdi)
movq %r13, 0x38(%rdi)
movq %r14, 0x40(%rdi)
movq %r15, 0x48(%rdi)
movq %rdx, 0x50(%rdi)
movq %rax, 0x58(%rdi)
movq %r8, 0x60(%rdi)
movq %r9, 0x68(%rdi)
movq %r10, 0x70(%rdi)
movq %r11, 0x78(%rdi)
movq %rbx, 0x80(%rdi)
movq %rbp, 0x88(%rdi)
CFI_INC_RSP(352)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(Lp384_montjscalarmul_alt_p384_montjadd)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp384_montjscalarmul_alt_p384_montjdouble)
Lp384_montjscalarmul_alt_p384_montjdouble:
CFI_START
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(344)
movq %rdi, 0x150(%rsp)
movq 0x60(%rsi), %rbx
movq 0x68(%rsi), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x78(%rsi), %rax
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x88(%rsi), %rax
mulq %rbx
movq %rax, %r13
movq %rdx, %r14
movq 0x78(%rsi), %rax
mulq 0x80(%rsi)
movq %rax, %r15
movq %rdx, %rcx
movq 0x70(%rsi), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x68(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbp, %rbp
movq 0x68(%rsi), %rbx
movq 0x78(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x80(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x88(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x0, %rcx
movq 0x80(%rsi), %rbx
movq 0x60(%rsi), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x70(%rsi), %rbx
movq 0x78(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x80(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x88(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r15
adcq %rdx, %rcx
sbbq %rbp, %rbp
xorl %ebx, %ebx
movq 0x78(%rsi), %rax
mulq 0x88(%rsi)
subq %rbp, %rdx
xorl %ebp, %ebp
addq %rax, %rcx
adcq %rdx, %rbx
adcl %ebp, %ebp
movq 0x80(%rsi), %rax
mulq 0x88(%rsi)
addq %rax, %rbx
adcq %rdx, %rbp
xorl %r8d, %r8d
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq %rcx, %rcx
adcq %rbx, %rbx
adcq %rbp, %rbp
adcl %r8d, %r8d
movq 0x60(%rsi), %rax
mulq %rax
movq %r8, (%rsp)
movq %rax, %r8
movq 0x68(%rsi), %rax
movq %rbp, 0x8(%rsp)
addq %rdx, %r9
sbbq %rbp, %rbp
mulq %rax
negq %rbp
adcq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x70(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x78(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x80(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %rcx
adcq %rdx, %rbx
sbbq %rbp, %rbp
movq 0x88(%rsi), %rax
mulq %rax
negq %rbp
adcq 0x8(%rsp), %rax
adcq (%rsp), %rdx
movq %rax, %rbp
movq %rdx, %rdi
movq %rbx, (%rsp)
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r8
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r8, %r9
sbbq %rdx, %r10
sbbq %rax, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rbx, %r8
sbbq $0x0, %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r9
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r9, %r10
sbbq %rdx, %r11
sbbq %rax, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rbx, %r9
sbbq $0x0, %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r10
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r10, %r11
sbbq %rdx, %r12
sbbq %rax, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rbx, %r10
sbbq $0x0, %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r11
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r11, %r12
sbbq %rdx, %r13
sbbq %rax, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rbx, %r11
sbbq $0x0, %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r12
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r12, %r13
sbbq %rdx, %r8
sbbq %rax, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rbx, %r12
sbbq $0x0, %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r13
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r13, %r8
sbbq %rdx, %r9
sbbq %rax, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rbx, %r13
sbbq $0x0, %r13
movq (%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rdi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rdi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rdi
movq %r14, (%rsp)
movq %r15, 0x8(%rsp)
movq %rcx, 0x10(%rsp)
movq %rbx, 0x18(%rsp)
movq %rbp, 0x20(%rsp)
movq %rdi, 0x28(%rsp)
movq 0x30(%rsi), %rbx
movq 0x38(%rsi), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x48(%rsi), %rax
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x58(%rsi), %rax
mulq %rbx
movq %rax, %r13
movq %rdx, %r14
movq 0x48(%rsi), %rax
mulq 0x50(%rsi)
movq %rax, %r15
movq %rdx, %rcx
movq 0x40(%rsi), %rbx
movq 0x30(%rsi), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x38(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbp, %rbp
movq 0x38(%rsi), %rbx
movq 0x48(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x50(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x58(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x0, %rcx
movq 0x50(%rsi), %rbx
movq 0x30(%rsi), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x40(%rsi), %rbx
movq 0x48(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x50(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x58(%rsi), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r15
adcq %rdx, %rcx
sbbq %rbp, %rbp
xorl %ebx, %ebx
movq 0x48(%rsi), %rax
mulq 0x58(%rsi)
subq %rbp, %rdx
xorl %ebp, %ebp
addq %rax, %rcx
adcq %rdx, %rbx
adcl %ebp, %ebp
movq 0x50(%rsi), %rax
mulq 0x58(%rsi)
addq %rax, %rbx
adcq %rdx, %rbp
xorl %r8d, %r8d
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq %rcx, %rcx
adcq %rbx, %rbx
adcq %rbp, %rbp
adcl %r8d, %r8d
movq 0x30(%rsi), %rax
mulq %rax
movq %r8, 0x30(%rsp)
movq %rax, %r8
movq 0x38(%rsi), %rax
movq %rbp, 0x38(%rsp)
addq %rdx, %r9
sbbq %rbp, %rbp
mulq %rax
negq %rbp
adcq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x40(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x48(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x50(%rsi), %rax
mulq %rax
negq %rbp
adcq %rax, %rcx
adcq %rdx, %rbx
sbbq %rbp, %rbp
movq 0x58(%rsi), %rax
mulq %rax
negq %rbp
adcq 0x38(%rsp), %rax
adcq 0x30(%rsp), %rdx
movq %rax, %rbp
movq %rdx, %rdi
movq %rbx, 0x30(%rsp)
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r8
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r8, %r9
sbbq %rdx, %r10
sbbq %rax, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rbx, %r8
sbbq $0x0, %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r9
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r9, %r10
sbbq %rdx, %r11
sbbq %rax, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rbx, %r9
sbbq $0x0, %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r10
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r10, %r11
sbbq %rdx, %r12
sbbq %rax, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rbx, %r10
sbbq $0x0, %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r11
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r11, %r12
sbbq %rdx, %r13
sbbq %rax, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rbx, %r11
sbbq $0x0, %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r12
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r12, %r13
sbbq %rdx, %r8
sbbq %rax, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rbx, %r12
sbbq $0x0, %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r13
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r13, %r8
sbbq %rdx, %r9
sbbq %rax, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rbx, %r13
sbbq $0x0, %r13
movq 0x30(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rdi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rdi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rdi
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq %rcx, 0x40(%rsp)
movq %rbx, 0x48(%rsp)
movq %rbp, 0x50(%rsp)
movq %rdi, 0x58(%rsp)
movq (%rsi), %rax
addq (%rsp), %rax
movq 0x8(%rsi), %rcx
adcq 0x8(%rsp), %rcx
movq 0x10(%rsi), %r8
adcq 0x10(%rsp), %r8
movq 0x18(%rsi), %r9
adcq 0x18(%rsp), %r9
movq 0x20(%rsi), %r10
adcq 0x20(%rsp), %r10
movq 0x28(%rsi), %r11
adcq 0x28(%rsp), %r11
sbbq %rdx, %rdx
movl $0x1, %ebx
andq %rdx, %rbx
movl $0xffffffff, %ebp
andq %rbp, %rdx
xorq %rbp, %rbp
subq %rdx, %rbp
addq %rbp, %rax
movq %rax, 0xf0(%rsp)
adcq %rdx, %rcx
movq %rcx, 0xf8(%rsp)
adcq %rbx, %r8
movq %r8, 0x100(%rsp)
adcq $0x0, %r9
movq %r9, 0x108(%rsp)
adcq $0x0, %r10
movq %r10, 0x110(%rsp)
adcq $0x0, %r11
movq %r11, 0x118(%rsp)
movq (%rsi), %rax
subq (%rsp), %rax
movq 0x8(%rsi), %rdx
sbbq 0x8(%rsp), %rdx
movq 0x10(%rsi), %r8
sbbq 0x10(%rsp), %r8
movq 0x18(%rsi), %r9
sbbq 0x18(%rsp), %r9
movq 0x20(%rsi), %r10
sbbq 0x20(%rsp), %r10
movq 0x28(%rsi), %r11
sbbq 0x28(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %ebx
andq %rbx, %rcx
xorq %rbx, %rbx
subq %rcx, %rbx
subq %rbx, %rax
movq %rax, 0xc0(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0xc8(%rsp)
sbbq %rax, %rax
andq %rbx, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0xd0(%rsp)
sbbq $0x0, %r9
movq %r9, 0xd8(%rsp)
sbbq $0x0, %r10
movq %r10, 0xe0(%rsp)
sbbq $0x0, %r11
movq %r11, 0xe8(%rsp)
movq 0xc0(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0xf8(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x100(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x108(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x110(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x118(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0xc8(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x100(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x108(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x110(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x118(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0xd0(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x100(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x108(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x110(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x118(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0xd8(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x100(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x108(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x110(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x118(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0xe0(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x100(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x108(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x110(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x118(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0xe8(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0xf8(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x100(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x108(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x110(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x118(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x60(%rsp)
movq %r15, 0x68(%rsp)
movq %r8, 0x70(%rsp)
movq %r9, 0x78(%rsp)
movq %r10, 0x80(%rsp)
movq %r11, 0x88(%rsp)
movq 0x30(%rsi), %rax
addq 0x60(%rsi), %rax
movq 0x38(%rsi), %rcx
adcq 0x68(%rsi), %rcx
movq 0x40(%rsi), %r8
adcq 0x70(%rsi), %r8
movq 0x48(%rsi), %r9
adcq 0x78(%rsi), %r9
movq 0x50(%rsi), %r10
adcq 0x80(%rsi), %r10
movq 0x58(%rsi), %r11
adcq 0x88(%rsi), %r11
movl $0x0, %edx
adcq %rdx, %rdx
movabsq $0xffffffff00000001, %rbp
addq %rbp, %rax
movl $0xffffffff, %ebp
adcq %rbp, %rcx
adcq $0x1, %r8
adcq $0x0, %r9
adcq $0x0, %r10
adcq $0x0, %r11
adcq $0xffffffffffffffff, %rdx
movl $0x1, %ebx
andq %rdx, %rbx
andq %rbp, %rdx
xorq %rbp, %rbp
subq %rdx, %rbp
subq %rbp, %rax
movq %rax, 0xf0(%rsp)
sbbq %rdx, %rcx
movq %rcx, 0xf8(%rsp)
sbbq %rbx, %r8
movq %r8, 0x100(%rsp)
sbbq $0x0, %r9
movq %r9, 0x108(%rsp)
sbbq $0x0, %r10
movq %r10, 0x110(%rsp)
sbbq $0x0, %r11
movq %r11, 0x118(%rsp)
movq 0x60(%rsp), %rbx
movq 0x68(%rsp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x78(%rsp), %rax
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x88(%rsp), %rax
mulq %rbx
movq %rax, %r13
movq %rdx, %r14
movq 0x78(%rsp), %rax
mulq 0x80(%rsp)
movq %rax, %r15
movq %rdx, %rcx
movq 0x70(%rsp), %rbx
movq 0x60(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x68(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbp, %rbp
movq 0x68(%rsp), %rbx
movq 0x78(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x80(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x88(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x0, %rcx
movq 0x80(%rsp), %rbx
movq 0x60(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x70(%rsp), %rbx
movq 0x78(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x80(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x88(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r15
adcq %rdx, %rcx
sbbq %rbp, %rbp
xorl %ebx, %ebx
movq 0x78(%rsp), %rax
mulq 0x88(%rsp)
subq %rbp, %rdx
xorl %ebp, %ebp
addq %rax, %rcx
adcq %rdx, %rbx
adcl %ebp, %ebp
movq 0x80(%rsp), %rax
mulq 0x88(%rsp)
addq %rax, %rbx
adcq %rdx, %rbp
xorl %r8d, %r8d
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq %rcx, %rcx
adcq %rbx, %rbx
adcq %rbp, %rbp
adcl %r8d, %r8d
movq 0x60(%rsp), %rax
mulq %rax
movq %r8, 0x120(%rsp)
movq %rax, %r8
movq 0x68(%rsp), %rax
movq %rbp, 0x128(%rsp)
addq %rdx, %r9
sbbq %rbp, %rbp
mulq %rax
negq %rbp
adcq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x70(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x78(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x80(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %rcx
adcq %rdx, %rbx
sbbq %rbp, %rbp
movq 0x88(%rsp), %rax
mulq %rax
negq %rbp
adcq 0x128(%rsp), %rax
adcq 0x120(%rsp), %rdx
movq %rax, %rbp
movq %rdx, %rdi
movq %rbx, 0x120(%rsp)
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r8
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r8, %r9
sbbq %rdx, %r10
sbbq %rax, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rbx, %r8
sbbq $0x0, %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r9
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r9, %r10
sbbq %rdx, %r11
sbbq %rax, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rbx, %r9
sbbq $0x0, %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r10
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r10, %r11
sbbq %rdx, %r12
sbbq %rax, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rbx, %r10
sbbq $0x0, %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r11
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r11, %r12
sbbq %rdx, %r13
sbbq %rax, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rbx, %r11
sbbq $0x0, %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r12
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r12, %r13
sbbq %rdx, %r8
sbbq %rax, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rbx, %r12
sbbq $0x0, %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r13
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r13, %r8
sbbq %rdx, %r9
sbbq %rax, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rbx, %r13
sbbq $0x0, %r13
movq 0x120(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rdi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rdi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rdi
movq %r14, 0x120(%rsp)
movq %r15, 0x128(%rsp)
movq %rcx, 0x130(%rsp)
movq %rbx, 0x138(%rsp)
movq %rbp, 0x140(%rsp)
movq %rdi, 0x148(%rsp)
movq 0x30(%rsp), %rbx
movq (%rsi), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0x8(%rsi), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x10(%rsi), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x18(%rsi), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x20(%rsi), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x28(%rsi), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x38(%rsp), %rbx
movq (%rsi), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0x8(%rsi), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x10(%rsi), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x18(%rsi), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x20(%rsi), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x28(%rsi), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x40(%rsp), %rbx
movq (%rsi), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0x8(%rsi), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x10(%rsi), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x18(%rsi), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x20(%rsi), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x28(%rsi), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x48(%rsp), %rbx
movq (%rsi), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0x8(%rsi), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x10(%rsi), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x18(%rsi), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x20(%rsi), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x28(%rsi), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x50(%rsp), %rbx
movq (%rsi), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0x8(%rsi), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x10(%rsi), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x18(%rsi), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x20(%rsi), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x28(%rsi), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x58(%rsp), %rbx
movq (%rsi), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0x8(%rsi), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x10(%rsi), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x18(%rsi), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x20(%rsi), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x28(%rsi), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movq %r9, 0xa8(%rsp)
movq %r10, 0xb0(%rsp)
movq %r11, 0xb8(%rsp)
movq 0xf0(%rsp), %rbx
movq 0xf8(%rsp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x108(%rsp), %rax
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x118(%rsp), %rax
mulq %rbx
movq %rax, %r13
movq %rdx, %r14
movq 0x108(%rsp), %rax
mulq 0x110(%rsp)
movq %rax, %r15
movq %rdx, %rcx
movq 0x100(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0xf8(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbp, %rbp
movq 0xf8(%rsp), %rbx
movq 0x108(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x110(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x118(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x0, %rcx
movq 0x110(%rsp), %rbx
movq 0xf0(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x100(%rsp), %rbx
movq 0x108(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x110(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x118(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r15
adcq %rdx, %rcx
sbbq %rbp, %rbp
xorl %ebx, %ebx
movq 0x108(%rsp), %rax
mulq 0x118(%rsp)
subq %rbp, %rdx
xorl %ebp, %ebp
addq %rax, %rcx
adcq %rdx, %rbx
adcl %ebp, %ebp
movq 0x110(%rsp), %rax
mulq 0x118(%rsp)
addq %rax, %rbx
adcq %rdx, %rbp
xorl %r8d, %r8d
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq %rcx, %rcx
adcq %rbx, %rbx
adcq %rbp, %rbp
adcl %r8d, %r8d
movq 0xf0(%rsp), %rax
mulq %rax
movq %r8, 0xc0(%rsp)
movq %rax, %r8
movq 0xf8(%rsp), %rax
movq %rbp, 0xc8(%rsp)
addq %rdx, %r9
sbbq %rbp, %rbp
mulq %rax
negq %rbp
adcq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x100(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x108(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x110(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %rcx
adcq %rdx, %rbx
sbbq %rbp, %rbp
movq 0x118(%rsp), %rax
mulq %rax
negq %rbp
adcq 0xc8(%rsp), %rax
adcq 0xc0(%rsp), %rdx
movq %rax, %rbp
movq %rdx, %rdi
movq %rbx, 0xc0(%rsp)
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r8
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r8, %r9
sbbq %rdx, %r10
sbbq %rax, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rbx, %r8
sbbq $0x0, %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r9
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r9, %r10
sbbq %rdx, %r11
sbbq %rax, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rbx, %r9
sbbq $0x0, %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r10
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r10, %r11
sbbq %rdx, %r12
sbbq %rax, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rbx, %r10
sbbq $0x0, %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r11
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r11, %r12
sbbq %rdx, %r13
sbbq %rax, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rbx, %r11
sbbq $0x0, %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r12
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r12, %r13
sbbq %rdx, %r8
sbbq %rax, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rbx, %r12
sbbq $0x0, %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r13
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r13, %r8
sbbq %rdx, %r9
sbbq %rax, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rbx, %r13
sbbq $0x0, %r13
movq 0xc0(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rdi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rdi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rdi
movq %r14, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
movq %rcx, 0xd0(%rsp)
movq %rbx, 0xd8(%rsp)
movq %rbp, 0xe0(%rsp)
movq %rdi, 0xe8(%rsp)
movabsq $0xffffffff, %r9
subq 0x120(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq 0x128(%rsp), %r10
movq $0xfffffffffffffffe, %r11
sbbq 0x130(%rsp), %r11
movq $0xffffffffffffffff, %r12
sbbq 0x138(%rsp), %r12
movq $0xffffffffffffffff, %r13
sbbq 0x140(%rsp), %r13
movq $0xffffffffffffffff, %r14
sbbq 0x148(%rsp), %r14
movq $0x9, %rcx
movq %r9, %rax
mulq %rcx
movq %rax, %r8
movq %rdx, %r9
movq %r10, %rax
xorl %r10d, %r10d
mulq %rcx
addq %rax, %r9
adcq %rdx, %r10
movq %r11, %rax
xorl %r11d, %r11d
mulq %rcx
addq %rax, %r10
adcq %rdx, %r11
movq %r12, %rax
xorl %r12d, %r12d
mulq %rcx
addq %rax, %r11
adcq %rdx, %r12
movq %r13, %rax
xorl %r13d, %r13d
mulq %rcx
addq %rax, %r12
adcq %rdx, %r13
movq %r14, %rax
movl $0x1, %r14d
mulq %rcx
addq %rax, %r13
adcq %rdx, %r14
movl $0xc, %ecx
movq 0x90(%rsp), %rax
mulq %rcx
addq %rax, %r8
adcq %rdx, %r9
sbbq %rbx, %rbx
movq 0x98(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %rbx, %rbx
movq 0xa0(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbx, %rbx
movq 0xa8(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbx, %rbx
movq 0xb0(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbx, %rbx
movq 0xb8(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r13
adcq %rdx, %r14
movabsq $0xffffffff00000001, %rax
mulq %r14
addq %rax, %r8
adcq %rdx, %r9
adcq %r14, %r10
movq %r14, %rax
sbbq %rcx, %rcx
movl $0xffffffff, %edx
negq %rcx
mulq %rdx
addq %rax, %r9
adcq %rdx, %r10
adcq %rcx, %r11
adcq $0x0, %r12
adcq $0x0, %r13
sbbq %rcx, %rcx
notq %rcx
movl $0xffffffff, %edx
xorq %rax, %rax
andq %rcx, %rdx
subq %rdx, %rax
andq $0x1, %rcx
subq %rax, %r8
movq %r8, 0x120(%rsp)
sbbq %rdx, %r9
movq %r9, 0x128(%rsp)
sbbq %rcx, %r10
movq %r10, 0x130(%rsp)
sbbq $0x0, %r11
movq %r11, 0x138(%rsp)
sbbq $0x0, %r12
movq %r12, 0x140(%rsp)
sbbq $0x0, %r13
movq %r13, 0x148(%rsp)
movq 0xc0(%rsp), %rax
subq (%rsp), %rax
movq 0xc8(%rsp), %rdx
sbbq 0x8(%rsp), %rdx
movq 0xd0(%rsp), %r8
sbbq 0x10(%rsp), %r8
movq 0xd8(%rsp), %r9
sbbq 0x18(%rsp), %r9
movq 0xe0(%rsp), %r10
sbbq 0x20(%rsp), %r10
movq 0xe8(%rsp), %r11
sbbq 0x28(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %ebx
andq %rbx, %rcx
xorq %rbx, %rbx
subq %rcx, %rbx
subq %rbx, %rax
movq %rax, 0xf0(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0xf8(%rsp)
sbbq %rax, %rax
andq %rbx, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x100(%rsp)
sbbq $0x0, %r9
movq %r9, 0x108(%rsp)
sbbq $0x0, %r10
movq %r10, 0x110(%rsp)
sbbq $0x0, %r11
movq %r11, 0x118(%rsp)
movq 0x30(%rsp), %rbx
movq 0x38(%rsp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x48(%rsp), %rax
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x58(%rsp), %rax
mulq %rbx
movq %rax, %r13
movq %rdx, %r14
movq 0x48(%rsp), %rax
mulq 0x50(%rsp)
movq %rax, %r15
movq %rdx, %rcx
movq 0x40(%rsp), %rbx
movq 0x30(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x38(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbp, %rbp
movq 0x38(%rsp), %rbx
movq 0x48(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x50(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x58(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
adcq $0x0, %rcx
movq 0x50(%rsp), %rbx
movq 0x30(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x40(%rsp), %rbx
movq 0x48(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %rbp, %rbp
movq 0x50(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x58(%rsp), %rax
mulq %rbx
subq %rbp, %rdx
addq %rax, %r15
adcq %rdx, %rcx
sbbq %rbp, %rbp
xorl %ebx, %ebx
movq 0x48(%rsp), %rax
mulq 0x58(%rsp)
subq %rbp, %rdx
xorl %ebp, %ebp
addq %rax, %rcx
adcq %rdx, %rbx
adcl %ebp, %ebp
movq 0x50(%rsp), %rax
mulq 0x58(%rsp)
addq %rax, %rbx
adcq %rdx, %rbp
xorl %r8d, %r8d
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %r15, %r15
adcq %rcx, %rcx
adcq %rbx, %rbx
adcq %rbp, %rbp
adcl %r8d, %r8d
movq 0x30(%rsp), %rax
mulq %rax
movq %r8, 0xc0(%rsp)
movq %rax, %r8
movq 0x38(%rsp), %rax
movq %rbp, 0xc8(%rsp)
addq %rdx, %r9
sbbq %rbp, %rbp
mulq %rax
negq %rbp
adcq %rax, %r10
adcq %rdx, %r11
sbbq %rbp, %rbp
movq 0x40(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %r12
adcq %rdx, %r13
sbbq %rbp, %rbp
movq 0x48(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %r14
adcq %rdx, %r15
sbbq %rbp, %rbp
movq 0x50(%rsp), %rax
mulq %rax
negq %rbp
adcq %rax, %rcx
adcq %rdx, %rbx
sbbq %rbp, %rbp
movq 0x58(%rsp), %rax
mulq %rax
negq %rbp
adcq 0xc8(%rsp), %rax
adcq 0xc0(%rsp), %rdx
movq %rax, %rbp
movq %rdx, %rdi
movq %rbx, 0xc0(%rsp)
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r8
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r8, %r9
sbbq %rdx, %r10
sbbq %rax, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rbx, %r8
sbbq $0x0, %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r9
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r9, %r10
sbbq %rdx, %r11
sbbq %rax, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rbx, %r9
sbbq $0x0, %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r10
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r10, %r11
sbbq %rdx, %r12
sbbq %rax, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rbx, %r10
sbbq $0x0, %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r11
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r11, %r12
sbbq %rdx, %r13
sbbq %rax, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rbx, %r11
sbbq $0x0, %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r12
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r12, %r13
sbbq %rdx, %r8
sbbq %rax, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rbx, %r12
sbbq $0x0, %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %rax, %r13
movl $0x0, %eax
adcq %rbx, %rdx
adcl %eax, %eax
subq %r13, %r8
sbbq %rdx, %r9
sbbq %rax, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rbx, %r13
sbbq $0x0, %r13
movq 0xc0(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rdi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rdi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rdi
movq %r14, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
movq %rcx, 0xd0(%rsp)
movq %rbx, 0xd8(%rsp)
movq %rbp, 0xe0(%rsp)
movq %rdi, 0xe8(%rsp)
movq 0x150(%rsp), %rdi
movq 0xf0(%rsp), %rax
subq 0x30(%rsp), %rax
movq 0xf8(%rsp), %rdx
sbbq 0x38(%rsp), %rdx
movq 0x100(%rsp), %r8
sbbq 0x40(%rsp), %r8
movq 0x108(%rsp), %r9
sbbq 0x48(%rsp), %r9
movq 0x110(%rsp), %r10
sbbq 0x50(%rsp), %r10
movq 0x118(%rsp), %r11
sbbq 0x58(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %ebx
andq %rbx, %rcx
xorq %rbx, %rbx
subq %rcx, %rbx
subq %rbx, %rax
movq %rax, 0x60(%rdi)
sbbq %rcx, %rdx
movq %rdx, 0x68(%rdi)
sbbq %rax, %rax
andq %rbx, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x70(%rdi)
sbbq $0x0, %r9
movq %r9, 0x78(%rdi)
sbbq $0x0, %r10
movq %r10, 0x80(%rdi)
sbbq $0x0, %r11
movq %r11, 0x88(%rdi)
movq 0x60(%rsp), %rbx
movq 0x120(%rsp), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 0x128(%rsp), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 0x130(%rsp), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 0x138(%rsp), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 0x140(%rsp), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 0x148(%rsp), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
movq %r8, %rbx
shlq $0x20, %rbx
addq %r8, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r8
movabsq $0xffffffff, %rax
mulq %rbx
addq %r8, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rdx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rbx
addq %rbx, %r14
adcq $0x0, %r15
movq 0x68(%rsp), %rbx
movq 0x120(%rsp), %rax
mulq %rbx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r8, %r8
movq 0x128(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r8, %r8
movq 0x130(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r8, %r8
movq 0x138(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r8, %r8
movq 0x140(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r8, %r8
movq 0x148(%rsp), %rax
mulq %rbx
subq %r8, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r8, %r8
negq %r8
movq %r9, %rbx
shlq $0x20, %rbx
addq %r9, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r9
movabsq $0xffffffff, %rax
mulq %rbx
addq %r9, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rdx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rbx
addq %rbx, %r15
adcq $0x0, %r8
movq 0x70(%rsp), %rbx
movq 0x120(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r9, %r9
movq 0x128(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r9, %r9
movq 0x130(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r9, %r9
movq 0x138(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r9, %r9
movq 0x140(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r9, %r9
movq 0x148(%rsp), %rax
mulq %rbx
subq %r9, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r9, %r9
negq %r9
movq %r10, %rbx
shlq $0x20, %rbx
addq %r10, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r10
movabsq $0xffffffff, %rax
mulq %rbx
addq %r10, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rdx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rbx
addq %rbx, %r8
adcq $0x0, %r9
movq 0x78(%rsp), %rbx
movq 0x120(%rsp), %rax
mulq %rbx
addq %rax, %r11
adcq %rdx, %r12
sbbq %r10, %r10
movq 0x128(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r10, %r10
movq 0x130(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r10, %r10
movq 0x138(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r10, %r10
movq 0x140(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r10, %r10
movq 0x148(%rsp), %rax
mulq %rbx
subq %r10, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r10, %r10
negq %r10
movq %r11, %rbx
shlq $0x20, %rbx
addq %r11, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r11
movabsq $0xffffffff, %rax
mulq %rbx
addq %r11, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rdx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rbx
addq %rbx, %r9
adcq $0x0, %r10
movq 0x80(%rsp), %rbx
movq 0x120(%rsp), %rax
mulq %rbx
addq %rax, %r12
adcq %rdx, %r13
sbbq %r11, %r11
movq 0x128(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r11, %r11
movq 0x130(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r11, %r11
movq 0x138(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r11, %r11
movq 0x140(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r11, %r11
movq 0x148(%rsp), %rax
mulq %rbx
subq %r11, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r11, %r11
negq %r11
movq %r12, %rbx
shlq $0x20, %rbx
addq %r12, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r12
movabsq $0xffffffff, %rax
mulq %rbx
addq %r12, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rdx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rbx
addq %rbx, %r10
adcq $0x0, %r11
movq 0x88(%rsp), %rbx
movq 0x120(%rsp), %rax
mulq %rbx
addq %rax, %r13
adcq %rdx, %r14
sbbq %r12, %r12
movq 0x128(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r14
adcq %rdx, %r15
sbbq %r12, %r12
movq 0x130(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r15
adcq %rdx, %r8
sbbq %r12, %r12
movq 0x138(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r8
adcq %rdx, %r9
sbbq %r12, %r12
movq 0x140(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %r12, %r12
movq 0x148(%rsp), %rax
mulq %rbx
subq %r12, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %r12, %r12
negq %r12
movq %r13, %rbx
shlq $0x20, %rbx
addq %r13, %rbx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulq %rbx
movq %rdx, %r13
movabsq $0xffffffff, %rax
mulq %rbx
addq %r13, %rax
adcq %rbx, %rdx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rdx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rbx
addq %rbx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xf0(%rsp)
movq %r15, 0xf8(%rsp)
movq %r8, 0x100(%rsp)
movq %r9, 0x108(%rsp)
movq %r10, 0x110(%rsp)
movq %r11, 0x118(%rsp)
movq 0xb8(%rsp), %rcx
movq %rcx, %r13
shrq $0x3e, %rcx
movq 0xb0(%rsp), %r12
shldq $0x2, %r12, %r13
movq 0xa8(%rsp), %r11
shldq $0x2, %r11, %r12
movq 0xa0(%rsp), %r10
shldq $0x2, %r10, %r11
movq 0x98(%rsp), %r9
shldq $0x2, %r9, %r10
movq 0x90(%rsp), %r8
shldq $0x2, %r8, %r9
shlq $0x2, %r8
addq $0x1, %rcx
subq 0x120(%rsp), %r8
sbbq 0x128(%rsp), %r9
sbbq 0x130(%rsp), %r10
sbbq 0x138(%rsp), %r11
sbbq 0x140(%rsp), %r12
sbbq 0x148(%rsp), %r13
sbbq $0x0, %rcx
movabsq $0xffffffff00000001, %rax
mulq %rcx
addq %rax, %r8
adcq %rdx, %r9
adcq %rcx, %r10
movq %rcx, %rax
sbbq %rcx, %rcx
movl $0xffffffff, %edx
negq %rcx
mulq %rdx
addq %rax, %r9
adcq %rdx, %r10
adcq %rcx, %r11
adcq $0x0, %r12
adcq $0x0, %r13
sbbq %rcx, %rcx
notq %rcx
movl $0xffffffff, %edx
xorq %rax, %rax
andq %rcx, %rdx
subq %rdx, %rax
andq $0x1, %rcx
subq %rax, %r8
movq %r8, (%rdi)
sbbq %rdx, %r9
movq %r9, 0x8(%rdi)
sbbq %rcx, %r10
movq %r10, 0x10(%rdi)
sbbq $0x0, %r11
movq %r11, 0x18(%rdi)
sbbq $0x0, %r12
movq %r12, 0x20(%rdi)
sbbq $0x0, %r13
movq %r13, 0x28(%rdi)
movabsq $0xffffffff, %r8
subq 0xc0(%rsp), %r8
movabsq $0xffffffff00000000, %r9
sbbq 0xc8(%rsp), %r9
movq $0xfffffffffffffffe, %r10
sbbq 0xd0(%rsp), %r10
movq $0xffffffffffffffff, %r11
sbbq 0xd8(%rsp), %r11
movq $0xffffffffffffffff, %r12
sbbq 0xe0(%rsp), %r12
movq $0xffffffffffffffff, %r13
sbbq 0xe8(%rsp), %r13
movq %r13, %r14
shrq $0x3d, %r14
shldq $0x3, %r12, %r13
shldq $0x3, %r11, %r12
shldq $0x3, %r10, %r11
shldq $0x3, %r9, %r10
shldq $0x3, %r8, %r9
shlq $0x3, %r8
addq $0x1, %r14
movl $0x3, %ecx
movq 0xf0(%rsp), %rax
mulq %rcx
addq %rax, %r8
adcq %rdx, %r9
sbbq %rbx, %rbx
movq 0xf8(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %rbx, %rbx
movq 0x100(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbx, %rbx
movq 0x108(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rbx, %rbx
movq 0x110(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r12
adcq %rdx, %r13
sbbq %rbx, %rbx
movq 0x118(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r13
adcq %rdx, %r14
movabsq $0xffffffff00000001, %rax
mulq %r14
addq %rax, %r8
adcq %rdx, %r9
adcq %r14, %r10
movq %r14, %rax
sbbq %rcx, %rcx
movl $0xffffffff, %edx
negq %rcx
mulq %rdx
addq %rax, %r9
adcq %rdx, %r10
adcq %rcx, %r11
adcq $0x0, %r12
adcq $0x0, %r13
sbbq %rcx, %rcx
notq %rcx
movl $0xffffffff, %edx
xorq %rax, %rax
andq %rcx, %rdx
subq %rdx, %rax
andq $0x1, %rcx
subq %rax, %r8
movq %r8, 0x30(%rdi)
sbbq %rdx, %r9
movq %r9, 0x38(%rdi)
sbbq %rcx, %r10
movq %r10, 0x40(%rdi)
sbbq $0x0, %r11
movq %r11, 0x48(%rdi)
sbbq $0x0, %r12
movq %r12, 0x50(%rdi)
sbbq $0x0, %r13
movq %r13, 0x58(%rdi)
CFI_INC_RSP(344)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(Lp384_montjscalarmul_alt_p384_montjdouble)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 48,781
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/p384_montjadd.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjadd(uint64_t p3[static 18], const uint64_t p1[static 18],
// const uint64_t p2[static 18]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjadd)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjadd)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjadd)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1 and %rcx = p2,
// which needs to be set up explicitly before use.
// The %rdi value never changes, however.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_2 0(%rcx)
#define y_2 NUMSIZE(%rcx)
#define z_2 (2*NUMSIZE)(%rcx)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// In one place it's convenient to use another register
// since the squaring function overwrites %rcx
#define z_2_alt (2*NUMSIZE)(%rsi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z1sq (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define x1a (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define z2sq (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define y1a (NUMSIZE*6)(%rsp)
// Temporaries for the actual input pointers
#define input_x (NUMSIZE*7)(%rsp)
#define input_y (NUMSIZE*7+8)(%rsp)
#define NSPACE 352
// Corresponds exactly to bignum_montmul_p384
#define montmul_p384(P0,P1,P2) \
movq P2, %rdx ; \
xorl %r15d, %r15d ; \
mulxq P1, %r8, %r9 ; \
mulxq 0x8+P1, %rbx, %r10 ; \
addq %rbx, %r9 ; \
mulxq 0x10+P1, %rbx, %r11 ; \
adcq %rbx, %r10 ; \
mulxq 0x18+P1, %rbx, %r12 ; \
adcq %rbx, %r11 ; \
mulxq 0x20+P1, %rbx, %r13 ; \
adcq %rbx, %r12 ; \
mulxq 0x28+P1, %rbx, %r14 ; \
adcq %rbx, %r13 ; \
adcq %r15, %r14 ; \
movq %r8, %rdx ; \
shlq $0x20, %rdx ; \
addq %r8, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r8, %rbx ; \
adcq %r8, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r9 ; \
sbbq %rbx, %r10 ; \
sbbq %rbp, %r11 ; \
sbbq $0x0, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x8+P2, %rdx ; \
xorl %r8d, %r8d ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
adoxq %r8, %r15 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcq %rax, %r14 ; \
adcq %rbx, %r15 ; \
adcq %r8, %r8 ; \
movq %r9, %rdx ; \
shlq $0x20, %rdx ; \
addq %r9, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r9, %rbx ; \
adcq %r9, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r10 ; \
sbbq %rbx, %r11 ; \
sbbq %rbp, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %r14 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x10+P2, %rdx ; \
xorl %r9d, %r9d ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
adoxq %r9, %r8 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcq %rax, %r15 ; \
adcq %rbx, %r8 ; \
adcq %r9, %r9 ; \
movq %r10, %rdx ; \
shlq $0x20, %rdx ; \
addq %r10, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r10, %rbx ; \
adcq %r10, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r11 ; \
sbbq %rbx, %r12 ; \
sbbq %rbp, %r13 ; \
sbbq $0x0, %r14 ; \
sbbq $0x0, %r15 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x18+P2, %rdx ; \
xorl %r10d, %r10d ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
adoxq %r10, %r9 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcq %rax, %r8 ; \
adcq %rbx, %r9 ; \
adcq %r10, %r10 ; \
movq %r11, %rdx ; \
shlq $0x20, %rdx ; \
addq %r11, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r11, %rbx ; \
adcq %r11, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r12 ; \
sbbq %rbx, %r13 ; \
sbbq %rbp, %r14 ; \
sbbq $0x0, %r15 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P2, %rdx ; \
xorl %r11d, %r11d ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
adoxq %r11, %r10 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcq %rax, %r9 ; \
adcq %rbx, %r10 ; \
adcq %r11, %r11 ; \
movq %r12, %rdx ; \
shlq $0x20, %rdx ; \
addq %r12, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r12, %rbx ; \
adcq %r12, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r13 ; \
sbbq %rbx, %r14 ; \
sbbq %rbp, %r15 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x28+P2, %rdx ; \
xorl %r12d, %r12d ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
adoxq %r12, %r11 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcq %rax, %r10 ; \
adcq %rbx, %r11 ; \
adcq %r12, %r12 ; \
movq %r13, %rdx ; \
shlq $0x20, %rdx ; \
addq %r13, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r13, %rbx ; \
adcq %r13, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r14 ; \
sbbq %rbx, %r15 ; \
sbbq %rbp, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %r10 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorl %edx, %edx ; \
xorl %ebp, %ebp ; \
xorl %r13d, %r13d ; \
movq $0xffffffff00000001, %rax ; \
addq %r14, %rax ; \
movl $0xffffffff, %ebx ; \
adcq %r15, %rbx ; \
movl $0x1, %ecx ; \
adcq %r8, %rcx ; \
adcq %r9, %rdx ; \
adcq %r10, %rbp ; \
adcq %r11, %r13 ; \
adcq $0x0, %r12 ; \
cmovne %rax, %r14 ; \
cmovne %rbx, %r15 ; \
cmovne %rcx, %r8 ; \
cmovne %rdx, %r9 ; \
cmovne %rbp, %r10 ; \
cmovne %r13, %r11 ; \
movq %r14, P0 ; \
movq %r15, 0x8+P0 ; \
movq %r8, 0x10+P0 ; \
movq %r9, 0x18+P0 ; \
movq %r10, 0x20+P0 ; \
movq %r11, 0x28+P0
// Corresponds exactly to bignum_montsqr_p384
#define montsqr_p384(P0,P1) \
movq P1, %rdx ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
mulxq 0x28+P1, %r13, %r14 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x20+P1, %r15, %rcx ; \
xorl %ebp, %ebp ; \
movq 0x10+P1, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq 0x8+P1, %rdx ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
adcxq %rbp, %r15 ; \
adoxq %rbp, %rcx ; \
adcq %rbp, %rcx ; \
xorl %ebp, %ebp ; \
movq 0x20+P1, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x28+P1, %rax, %rdx ; \
adcxq %rax, %r15 ; \
adoxq %rdx, %rcx ; \
movq 0x28+P1, %rdx ; \
mulxq 0x20+P1, %rbx, %rbp ; \
mulxq 0x18+P1, %rax, %rdx ; \
adcxq %rax, %rcx ; \
adoxq %rdx, %rbx ; \
movl $0x0, %eax ; \
adcxq %rax, %rbx ; \
adoxq %rax, %rbp ; \
adcq %rax, %rbp ; \
xorq %rax, %rax ; \
movq P1, %rdx ; \
mulxq P1, %r8, %rax ; \
adcxq %r9, %r9 ; \
adoxq %rax, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %r15, %r15 ; \
adoxq %rdx, %r15 ; \
movq 0x20+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %rcx, %rcx ; \
adoxq %rax, %rcx ; \
adcxq %rbx, %rbx ; \
adoxq %rdx, %rbx ; \
movq 0x28+P1, %rdx ; \
mulxq %rdx, %rax, %rsi ; \
adcxq %rbp, %rbp ; \
adoxq %rax, %rbp ; \
movl $0x0, %eax ; \
adcxq %rax, %rsi ; \
adoxq %rax, %rsi ; \
movq %rbx, P0 ; \
movq %r8, %rdx ; \
shlq $0x20, %rdx ; \
addq %r8, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r8, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r8 ; \
addq %rbx, %rax ; \
adcq %rdx, %r8 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r9 ; \
sbbq %r8, %r10 ; \
sbbq %rbx, %r11 ; \
sbbq $0x0, %r12 ; \
sbbq $0x0, %r13 ; \
movq %rdx, %r8 ; \
sbbq $0x0, %r8 ; \
movq %r9, %rdx ; \
shlq $0x20, %rdx ; \
addq %r9, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r9, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r9 ; \
addq %rbx, %rax ; \
adcq %rdx, %r9 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r10 ; \
sbbq %r9, %r11 ; \
sbbq %rbx, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %r8 ; \
movq %rdx, %r9 ; \
sbbq $0x0, %r9 ; \
movq %r10, %rdx ; \
shlq $0x20, %rdx ; \
addq %r10, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r10, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r10 ; \
addq %rbx, %rax ; \
adcq %rdx, %r10 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r11 ; \
sbbq %r10, %r12 ; \
sbbq %rbx, %r13 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %r9 ; \
movq %rdx, %r10 ; \
sbbq $0x0, %r10 ; \
movq %r11, %rdx ; \
shlq $0x20, %rdx ; \
addq %r11, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r11, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r11 ; \
addq %rbx, %rax ; \
adcq %rdx, %r11 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r12 ; \
sbbq %r11, %r13 ; \
sbbq %rbx, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %r10 ; \
movq %rdx, %r11 ; \
sbbq $0x0, %r11 ; \
movq %r12, %rdx ; \
shlq $0x20, %rdx ; \
addq %r12, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r12, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r12 ; \
addq %rbx, %rax ; \
adcq %rdx, %r12 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r13 ; \
sbbq %r12, %r8 ; \
sbbq %rbx, %r9 ; \
sbbq $0x0, %r10 ; \
sbbq $0x0, %r11 ; \
movq %rdx, %r12 ; \
sbbq $0x0, %r12 ; \
movq %r13, %rdx ; \
shlq $0x20, %rdx ; \
addq %r13, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r13, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r13 ; \
addq %rbx, %rax ; \
adcq %rdx, %r13 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r8 ; \
sbbq %r13, %r9 ; \
sbbq %rbx, %r10 ; \
sbbq $0x0, %r11 ; \
sbbq $0x0, %r12 ; \
movq %rdx, %r13 ; \
sbbq $0x0, %r13 ; \
movq P0, %rbx ; \
addq %r8, %r14 ; \
adcq %r9, %r15 ; \
adcq %r10, %rcx ; \
adcq %r11, %rbx ; \
adcq %r12, %rbp ; \
adcq %r13, %rsi ; \
movl $0x0, %r8d ; \
adcq %r8, %r8 ; \
xorq %r11, %r11 ; \
xorq %r12, %r12 ; \
xorq %r13, %r13 ; \
movq $0xffffffff00000001, %rax ; \
addq %r14, %rax ; \
movl $0xffffffff, %r9d ; \
adcq %r15, %r9 ; \
movl $0x1, %r10d ; \
adcq %rcx, %r10 ; \
adcq %rbx, %r11 ; \
adcq %rbp, %r12 ; \
adcq %rsi, %r13 ; \
adcq $0x0, %r8 ; \
cmovne %rax, %r14 ; \
cmovne %r9, %r15 ; \
cmovne %r10, %rcx ; \
cmovne %r11, %rbx ; \
cmovne %r12, %rbp ; \
cmovne %r13, %rsi ; \
movq %r14, P0 ; \
movq %r15, 0x8+P0 ; \
movq %rcx, 0x10+P0 ; \
movq %rbx, 0x18+P0 ; \
movq %rbp, 0x20+P0 ; \
movq %rsi, 0x28+P0
// Almost-Montgomery variant which we use when an input to other muls
// with the other argument fully reduced (which is always safe).
#define amontsqr_p384(P0,P1) \
movq P1, %rdx ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
mulxq 0x28+P1, %r13, %r14 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x20+P1, %r15, %rcx ; \
xorl %ebp, %ebp ; \
movq 0x10+P1, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq 0x8+P1, %rdx ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
adcxq %rbp, %r15 ; \
adoxq %rbp, %rcx ; \
adcq %rbp, %rcx ; \
xorl %ebp, %ebp ; \
movq 0x20+P1, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x28+P1, %rax, %rdx ; \
adcxq %rax, %r15 ; \
adoxq %rdx, %rcx ; \
movq 0x28+P1, %rdx ; \
mulxq 0x20+P1, %rbx, %rbp ; \
mulxq 0x18+P1, %rax, %rdx ; \
adcxq %rax, %rcx ; \
adoxq %rdx, %rbx ; \
movl $0x0, %eax ; \
adcxq %rax, %rbx ; \
adoxq %rax, %rbp ; \
adcq %rax, %rbp ; \
xorq %rax, %rax ; \
movq P1, %rdx ; \
mulxq P1, %r8, %rax ; \
adcxq %r9, %r9 ; \
adoxq %rax, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %r15, %r15 ; \
adoxq %rdx, %r15 ; \
movq 0x20+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %rcx, %rcx ; \
adoxq %rax, %rcx ; \
adcxq %rbx, %rbx ; \
adoxq %rdx, %rbx ; \
movq 0x28+P1, %rdx ; \
mulxq %rdx, %rax, %rsi ; \
adcxq %rbp, %rbp ; \
adoxq %rax, %rbp ; \
movl $0x0, %eax ; \
adcxq %rax, %rsi ; \
adoxq %rax, %rsi ; \
movq %rbx, P0 ; \
movq %r8, %rdx ; \
shlq $0x20, %rdx ; \
addq %r8, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r8, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r8 ; \
addq %rbx, %rax ; \
adcq %rdx, %r8 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r9 ; \
sbbq %r8, %r10 ; \
sbbq %rbx, %r11 ; \
sbbq $0x0, %r12 ; \
sbbq $0x0, %r13 ; \
movq %rdx, %r8 ; \
sbbq $0x0, %r8 ; \
movq %r9, %rdx ; \
shlq $0x20, %rdx ; \
addq %r9, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r9, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r9 ; \
addq %rbx, %rax ; \
adcq %rdx, %r9 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r10 ; \
sbbq %r9, %r11 ; \
sbbq %rbx, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %r8 ; \
movq %rdx, %r9 ; \
sbbq $0x0, %r9 ; \
movq %r10, %rdx ; \
shlq $0x20, %rdx ; \
addq %r10, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r10, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r10 ; \
addq %rbx, %rax ; \
adcq %rdx, %r10 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r11 ; \
sbbq %r10, %r12 ; \
sbbq %rbx, %r13 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %r9 ; \
movq %rdx, %r10 ; \
sbbq $0x0, %r10 ; \
movq %r11, %rdx ; \
shlq $0x20, %rdx ; \
addq %r11, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r11, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r11 ; \
addq %rbx, %rax ; \
adcq %rdx, %r11 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r12 ; \
sbbq %r11, %r13 ; \
sbbq %rbx, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %r10 ; \
movq %rdx, %r11 ; \
sbbq $0x0, %r11 ; \
movq %r12, %rdx ; \
shlq $0x20, %rdx ; \
addq %r12, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r12, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r12 ; \
addq %rbx, %rax ; \
adcq %rdx, %r12 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r13 ; \
sbbq %r12, %r8 ; \
sbbq %rbx, %r9 ; \
sbbq $0x0, %r10 ; \
sbbq $0x0, %r11 ; \
movq %rdx, %r12 ; \
sbbq $0x0, %r12 ; \
movq %r13, %rdx ; \
shlq $0x20, %rdx ; \
addq %r13, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r13, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r13 ; \
addq %rbx, %rax ; \
adcq %rdx, %r13 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r8 ; \
sbbq %r13, %r9 ; \
sbbq %rbx, %r10 ; \
sbbq $0x0, %r11 ; \
sbbq $0x0, %r12 ; \
movq %rdx, %r13 ; \
sbbq $0x0, %r13 ; \
movq P0, %rbx ; \
addq %r8, %r14 ; \
adcq %r9, %r15 ; \
adcq %r10, %rcx ; \
adcq %r11, %rbx ; \
adcq %r12, %rbp ; \
adcq %r13, %rsi ; \
movl $0x0, %r8d ; \
movq $0xffffffff00000001, %rax ; \
movl $0xffffffff, %r9d ; \
movl $0x1, %r10d ; \
cmovnc %r8, %rax ; \
cmovnc %r8, %r9 ; \
cmovnc %r8, %r10 ; \
addq %rax, %r14 ; \
adcq %r9, %r15 ; \
adcq %r10, %rcx ; \
adcq %r8, %rbx ; \
adcq %r8, %rbp ; \
adcq %r8, %rsi ; \
movq %r14, P0 ; \
movq %r15, 0x8+P0 ; \
movq %rcx, 0x10+P0 ; \
movq %rbx, 0x18+P0 ; \
movq %rbp, 0x20+P0 ; \
movq %rsi, 0x28+P0
// Corresponds exactly to bignum_sub_p384
#define sub_p384(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rdx ; \
sbbq 0x8+P2, %rdx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
sbbq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
sbbq 0x28+P2, %r11 ; \
sbbq %rcx, %rcx ; \
movl $0xffffffff, %esi ; \
andq %rsi, %rcx ; \
xorq %rsi, %rsi ; \
subq %rcx, %rsi ; \
subq %rsi, %rax ; \
movq %rax, P0 ; \
sbbq %rcx, %rdx ; \
movq %rdx, 0x8+P0 ; \
sbbq %rax, %rax ; \
andq %rsi, %rcx ; \
negq %rax; \
sbbq %rcx, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0
// Additional macros to help with final multiplexing
#define load6(r0,r1,r2,r3,r4,r5,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3 ; \
movq 32+P, r4 ; \
movq 40+P, r5
#define store6(P,r0,r1,r2,r3,r4,r5) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P ; \
movq r4, 32+P ; \
movq r5, 40+P ; \
#define czload6(r0,r1,r2,r3,r4,r5,P) \
cmovzq P, r0 ; \
cmovzq 8+P, r1 ; \
cmovzq 16+P, r2 ; \
cmovzq 24+P, r3 ; \
cmovzq 32+P, r4 ; \
cmovzq 40+P, r5
#define muxload6(r0,r1,r2,r3,r4,r5,P0,P1,P2) \
movq P0, r0 ; \
cmovbq P1, r0 ; \
cmovnbe P2, r0 ; \
movq 8+P0, r1 ; \
cmovbq 8+P1, r1 ; \
cmovnbe 8+P2, r1 ; \
movq 16+P0, r2 ; \
cmovbq 16+P1, r2 ; \
cmovnbe 16+P2, r2 ; \
movq 24+P0, r3 ; \
cmovbq 24+P1, r3 ; \
cmovnbe 24+P2, r3 ; \
movq 32+P0, r4 ; \
cmovbq 32+P1, r4 ; \
cmovnbe 32+P2, r4 ; \
movq 40+P0, r5 ; \
cmovbq 40+P1, r5 ; \
cmovnbe 40+P2, r5
S2N_BN_SYMBOL(p384_montjadd):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
// Put the input arguments in non-volatile places on the stack
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rsi, input_x
movq %rdx, input_y
// Main code, just a sequence of basic field operations
// 8 * multiply + 3 * square + 7 * subtract
amontsqr_p384(z1sq,z_1)
movq input_y, %rsi
amontsqr_p384(z2sq,z_2_alt)
movq input_x, %rsi
movq input_y, %rcx
montmul_p384(y1a,z_2,y_1)
movq input_x, %rsi
movq input_y, %rcx
montmul_p384(y2a,z_1,y_2)
movq input_y, %rcx
montmul_p384(x2a,z1sq,x_2)
movq input_x, %rsi
montmul_p384(x1a,z2sq,x_1)
montmul_p384(y2a,z1sq,y2a)
montmul_p384(y1a,z2sq,y1a)
sub_p384(xd,x2a,x1a)
sub_p384(yd,y2a,y1a)
amontsqr_p384(zz,xd)
montsqr_p384(ww,yd)
montmul_p384(zzx1,zz,x1a)
montmul_p384(zzx2,zz,x2a)
sub_p384(resx,ww,zzx1)
sub_p384(t1,zzx2,zzx1)
movq input_x, %rsi
montmul_p384(xd,xd,z_1)
sub_p384(resx,resx,zzx2)
sub_p384(t2,zzx1,resx)
montmul_p384(t1,t1,y1a)
movq input_y, %rcx
montmul_p384(resz,xd,z_2)
montmul_p384(t2,yd,t2)
sub_p384(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "NBE" <=> ~(CF \/ ZF) <=> P1 = 0 /\ ~(P2 = 0)
// and "B" <=> CF <=> ~(P1 = 0) /\ P2 = 0
// and "Z" <=> ZF <=> (P1 = 0 <=> P2 = 0)
// Multiplex the z outputs accordingly and re-store in resz
movq input_y, %rcx
load6(%r8,%r9,%r10,%r11,%rbx,%rbp,z_2)
movq %r8, %rax
movq %r9, %rdx
orq %r10, %rax
orq %r11, %rdx
orq %rbx, %rax
orq %rbp, %rdx
orq %rdx, %rax
negq %rax
sbbq %rax, %rax
movq input_x, %rsi
load6(%r12,%r13,%r14,%r15,%rdx,%rcx,z_1)
cmovzq %r12, %r8
cmovzq %r13, %r9
cmovzq %r14, %r10
cmovzq %r15, %r11
cmovzq %rdx, %rbx
cmovzq %rcx, %rbp
orq %r13, %r12
orq %r15, %r14
orq %rcx, %rdx
orq %r14, %r12
orq %r12, %rdx
negq %rdx
sbbq %rdx, %rdx
cmpq %rdx, %rax
czload6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz)
store6(resz,%r8,%r9,%r10,%r11,%rbx,%rbp)
// Multiplex the x and y outputs too, keeping the results in registers
movq input_y, %rcx
movq input_x, %rsi
muxload6(%r8,%r9,%r10,%r11,%rbx,%rbp,resx,x_1,x_2)
muxload6(%r12,%r13,%r14,%r15,%rdx,%rax,resy,y_1,y_2)
// Finally store back the multiplexed values
store6(x_3,%r8,%r9,%r10,%r11,%rbx,%rbp)
load6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz)
store6(y_3,%r12,%r13,%r14,%r15,%rdx,%rax)
store6(z_3,%r8,%r9,%r10,%r11,%rbx,%rbp)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p384_montjadd)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 43,935
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/p384_montjadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjadd_alt(uint64_t p3[static 18],
// const uint64_t p1[static 18],
// const uint64_t p2[static 18]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjadd_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjadd_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1 and %rcx = p2,
// which needs to be set up explicitly before use.
// The %rdi value never changes, however.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_2 0(%rcx)
#define y_2 NUMSIZE(%rcx)
#define z_2 (2*NUMSIZE)(%rcx)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// In one place it's convenient to use another register
// since the squaring function overwrites %rcx
#define z_2_alt (2*NUMSIZE)(%rsi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z1sq (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define x1a (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define z2sq (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define y1a (NUMSIZE*6)(%rsp)
// Temporaries for the actual input pointers
#define input_x (NUMSIZE*7)(%rsp)
#define input_y (NUMSIZE*7+8)(%rsp)
#define NSPACE 352
// Corresponds exactly to bignum_montmul_p384_alt
#define montmul_p384(P0,P1,P2) \
movq P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
xorl %r10d, %r10d ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
xorl %r11d, %r11d ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
xorl %r12d, %r12d ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
xorl %r13d, %r13d ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
xorl %r14d, %r14d ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
xorl %r15d, %r15d ; \
movq %r8, %rbx ; \
shlq $0x20, %rbx ; \
addq %r8, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r8 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r8, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbp, %r11 ; \
sbbq $0x0, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x8+P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %r8, %r8 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %r8, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %r8, %r8 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
subq %r8, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %r8, %r8 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %r8, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r8, %r8 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %r8, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %r8, %r8 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %r8, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %r8, %r8 ; \
negq %r8; \
movq %r9, %rbx ; \
shlq $0x20, %rbx ; \
addq %r9, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r9 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r9, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbp, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %r14 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x10+P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %r9, %r9 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %r9, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %r9, %r9 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
subq %r9, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r9, %r9 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %r9, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %r9, %r9 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %r9, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %r9, %r9 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %r9, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
sbbq %r9, %r9 ; \
negq %r9; \
movq %r10, %rbx ; \
shlq $0x20, %rbx ; \
addq %r10, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r10 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r10, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r11 ; \
sbbq %rdx, %r12 ; \
sbbq %rbp, %r13 ; \
sbbq $0x0, %r14 ; \
sbbq $0x0, %r15 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x18+P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %r10, %r10 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %r10, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r10, %r10 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
subq %r10, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %r10, %r10 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %r10, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %r10, %r10 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %r10, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
sbbq %r10, %r10 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %r10, %rdx ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %r10, %r10 ; \
negq %r10; \
movq %r11, %rbx ; \
shlq $0x20, %rbx ; \
addq %r11, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r11 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r11, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r12 ; \
sbbq %rdx, %r13 ; \
sbbq %rbp, %r14 ; \
sbbq $0x0, %r15 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r11, %r11 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %r11, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %r11, %r11 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
subq %r11, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %r11, %r11 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %r11, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
sbbq %r11, %r11 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %r11, %rdx ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %r11, %r11 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %r11, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %r11, %r11 ; \
negq %r11; \
movq %r12, %rbx ; \
shlq $0x20, %rbx ; \
addq %r12, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r12 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r12, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r13 ; \
sbbq %rdx, %r14 ; \
sbbq %rbp, %r15 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x28+P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %r12, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %r12, %r12 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
subq %r12, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
sbbq %r12, %r12 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %r12, %rdx ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %r12, %r12 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %r12, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %r12, %r12 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %r12, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %r12, %r12 ; \
negq %r12; \
movq %r13, %rbx ; \
shlq $0x20, %rbx ; \
addq %r13, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r13 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r13, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r14 ; \
sbbq %rdx, %r15 ; \
sbbq %rbp, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %r10 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r11 ; \
adcq $0x0, %r12 ; \
xorl %edx, %edx ; \
xorl %ebp, %ebp ; \
xorl %r13d, %r13d ; \
movq $0xffffffff00000001, %rax ; \
addq %r14, %rax ; \
movl $0xffffffff, %ebx ; \
adcq %r15, %rbx ; \
movl $0x1, %ecx ; \
adcq %r8, %rcx ; \
adcq %r9, %rdx ; \
adcq %r10, %rbp ; \
adcq %r11, %r13 ; \
adcq $0x0, %r12 ; \
cmovneq %rax, %r14 ; \
cmovneq %rbx, %r15 ; \
cmovneq %rcx, %r8 ; \
cmovneq %rdx, %r9 ; \
cmovneq %rbp, %r10 ; \
cmovneq %r13, %r11 ; \
movq %r14, P0 ; \
movq %r15, 0x8+P0 ; \
movq %r8, 0x10+P0 ; \
movq %r9, 0x18+P0 ; \
movq %r10, 0x20+P0 ; \
movq %r11, 0x28+P0
// Corresponds exactly to bignum_montsqr_p384_alt
#define montsqr_p384(P0,P1) \
movq P1, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
movq %rax, %r9 ; \
movq %rdx, %r10 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
movq %rax, %r11 ; \
movq %rdx, %r12 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
movq %rax, %r13 ; \
movq %rdx, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x20+P1; \
movq %rax, %r15 ; \
movq %rdx, %rcx ; \
movq 0x10+P1, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbp, %rbp ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %rbp, %rbp ; \
movq 0x8+P1, %rbx ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %rbp, %rbp ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %rbp, %rbp ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %rcx ; \
movq 0x20+P1, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %rbp, %rbp ; \
movq 0x10+P1, %rbx ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %rbp, %rbp ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %rbp, %rbp ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %rcx ; \
sbbq %rbp, %rbp ; \
xorl %ebx, %ebx ; \
movq 0x18+P1, %rax ; \
mulq 0x28+P1; \
subq %rbp, %rdx ; \
xorl %ebp, %ebp ; \
addq %rax, %rcx ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
movq 0x20+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
xorl %r8d, %r8d ; \
addq %r9, %r9 ; \
adcq %r10, %r10 ; \
adcq %r11, %r11 ; \
adcq %r12, %r12 ; \
adcq %r13, %r13 ; \
adcq %r14, %r14 ; \
adcq %r15, %r15 ; \
adcq %rcx, %rcx ; \
adcq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcl %r8d, %r8d ; \
movq P1, %rax ; \
mulq %rax; \
movq %r8, P0 ; \
movq %rax, %r8 ; \
movq 0x8+P1, %rax ; \
movq %rbp, 0x8+P0 ; \
addq %rdx, %r9 ; \
sbbq %rbp, %rbp ; \
mulq %rax; \
negq %rbp; \
adcq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbp, %rbp ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
negq %rbp; \
adcq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %rbp, %rbp ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
negq %rbp; \
adcq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %rbp, %rbp ; \
movq 0x20+P1, %rax ; \
mulq %rax; \
negq %rbp; \
adcq %rax, %rcx ; \
adcq %rdx, %rbx ; \
sbbq %rbp, %rbp ; \
movq 0x28+P1, %rax ; \
mulq %rax; \
negq %rbp; \
adcq 0x8+P0, %rax ; \
adcq P0, %rdx ; \
movq %rax, %rbp ; \
movq %rdx, %rsi ; \
movq %rbx, P0 ; \
movq %r8, %rbx ; \
shlq $0x20, %rbx ; \
addq %r8, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r8 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r8, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rax, %r11 ; \
sbbq $0x0, %r12 ; \
sbbq $0x0, %r13 ; \
movq %rbx, %r8 ; \
sbbq $0x0, %r8 ; \
movq %r9, %rbx ; \
shlq $0x20, %rbx ; \
addq %r9, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r9 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r9 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r9, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rax, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %r8 ; \
movq %rbx, %r9 ; \
sbbq $0x0, %r9 ; \
movq %r10, %rbx ; \
shlq $0x20, %rbx ; \
addq %r10, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r10 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r10 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r10, %r11 ; \
sbbq %rdx, %r12 ; \
sbbq %rax, %r13 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %r9 ; \
movq %rbx, %r10 ; \
sbbq $0x0, %r10 ; \
movq %r11, %rbx ; \
shlq $0x20, %rbx ; \
addq %r11, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r11 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r11 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r11, %r12 ; \
sbbq %rdx, %r13 ; \
sbbq %rax, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %r10 ; \
movq %rbx, %r11 ; \
sbbq $0x0, %r11 ; \
movq %r12, %rbx ; \
shlq $0x20, %rbx ; \
addq %r12, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r12 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r12 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r12, %r13 ; \
sbbq %rdx, %r8 ; \
sbbq %rax, %r9 ; \
sbbq $0x0, %r10 ; \
sbbq $0x0, %r11 ; \
movq %rbx, %r12 ; \
sbbq $0x0, %r12 ; \
movq %r13, %rbx ; \
shlq $0x20, %rbx ; \
addq %r13, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r13 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r13 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r13, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rax, %r10 ; \
sbbq $0x0, %r11 ; \
sbbq $0x0, %r12 ; \
movq %rbx, %r13 ; \
sbbq $0x0, %r13 ; \
movq P0, %rbx ; \
addq %r8, %r14 ; \
adcq %r9, %r15 ; \
adcq %r10, %rcx ; \
adcq %r11, %rbx ; \
adcq %r12, %rbp ; \
adcq %r13, %rsi ; \
movl $0x0, %r8d ; \
adcq %r8, %r8 ; \
xorq %r11, %r11 ; \
xorq %r12, %r12 ; \
xorq %r13, %r13 ; \
movq $0xffffffff00000001, %rax ; \
addq %r14, %rax ; \
movl $0xffffffff, %r9d ; \
adcq %r15, %r9 ; \
movl $0x1, %r10d ; \
adcq %rcx, %r10 ; \
adcq %rbx, %r11 ; \
adcq %rbp, %r12 ; \
adcq %rsi, %r13 ; \
adcq $0x0, %r8 ; \
cmovneq %rax, %r14 ; \
cmovneq %r9, %r15 ; \
cmovneq %r10, %rcx ; \
cmovneq %r11, %rbx ; \
cmovneq %r12, %rbp ; \
cmovneq %r13, %rsi ; \
movq %r14, P0 ; \
movq %r15, 0x8+P0 ; \
movq %rcx, 0x10+P0 ; \
movq %rbx, 0x18+P0 ; \
movq %rbp, 0x20+P0 ; \
movq %rsi, 0x28+P0
// Corresponds exactly to bignum_sub_p384
#define sub_p384(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rdx ; \
sbbq 0x8+P2, %rdx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
sbbq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
sbbq 0x28+P2, %r11 ; \
sbbq %rcx, %rcx ; \
movl $0xffffffff, %esi ; \
andq %rsi, %rcx ; \
xorq %rsi, %rsi ; \
subq %rcx, %rsi ; \
subq %rsi, %rax ; \
movq %rax, P0 ; \
sbbq %rcx, %rdx ; \
movq %rdx, 0x8+P0 ; \
sbbq %rax, %rax ; \
andq %rsi, %rcx ; \
negq %rax; \
sbbq %rcx, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0
// Additional macros to help with final multiplexing
#define load6(r0,r1,r2,r3,r4,r5,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3 ; \
movq 32+P, r4 ; \
movq 40+P, r5
#define store6(P,r0,r1,r2,r3,r4,r5) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P ; \
movq r4, 32+P ; \
movq r5, 40+P ; \
#define czload6(r0,r1,r2,r3,r4,r5,P) \
cmovzq P, r0 ; \
cmovzq 8+P, r1 ; \
cmovzq 16+P, r2 ; \
cmovzq 24+P, r3 ; \
cmovzq 32+P, r4 ; \
cmovzq 40+P, r5
#define muxload6(r0,r1,r2,r3,r4,r5,P0,P1,P2) \
movq P0, r0 ; \
cmovbq P1, r0 ; \
cmovnbe P2, r0 ; \
movq 8+P0, r1 ; \
cmovbq 8+P1, r1 ; \
cmovnbe 8+P2, r1 ; \
movq 16+P0, r2 ; \
cmovbq 16+P1, r2 ; \
cmovnbe 16+P2, r2 ; \
movq 24+P0, r3 ; \
cmovbq 24+P1, r3 ; \
cmovnbe 24+P2, r3 ; \
movq 32+P0, r4 ; \
cmovbq 32+P1, r4 ; \
cmovnbe 32+P2, r4 ; \
movq 40+P0, r5 ; \
cmovbq 40+P1, r5 ; \
cmovnbe 40+P2, r5
S2N_BN_SYMBOL(p384_montjadd_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
// Put the input arguments in non-volatile places on the stack
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rsi, input_x
movq %rdx, input_y
// Main code, just a sequence of basic field operations
// 8 * multiply + 3 * square + 7 * subtract
montsqr_p384(z1sq,z_1)
movq input_y, %rsi
montsqr_p384(z2sq,z_2_alt)
movq input_x, %rsi
movq input_y, %rcx
montmul_p384(y1a,z_2,y_1)
movq input_x, %rsi
movq input_y, %rcx
montmul_p384(y2a,z_1,y_2)
movq input_y, %rcx
montmul_p384(x2a,z1sq,x_2)
movq input_x, %rsi
montmul_p384(x1a,z2sq,x_1)
montmul_p384(y2a,z1sq,y2a)
montmul_p384(y1a,z2sq,y1a)
sub_p384(xd,x2a,x1a)
sub_p384(yd,y2a,y1a)
montsqr_p384(zz,xd)
montsqr_p384(ww,yd)
montmul_p384(zzx1,zz,x1a)
montmul_p384(zzx2,zz,x2a)
sub_p384(resx,ww,zzx1)
sub_p384(t1,zzx2,zzx1)
movq input_x, %rsi
montmul_p384(xd,xd,z_1)
sub_p384(resx,resx,zzx2)
sub_p384(t2,zzx1,resx)
montmul_p384(t1,t1,y1a)
movq input_y, %rcx
montmul_p384(resz,xd,z_2)
montmul_p384(t2,yd,t2)
sub_p384(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "NBE" <=> ~(CF \/ ZF) <=> P1 = 0 /\ ~(P2 = 0)
// and "B" <=> CF <=> ~(P1 = 0) /\ P2 = 0
// and "Z" <=> ZF <=> (P1 = 0 <=> P2 = 0)
// Multiplex the z outputs accordingly and re-store in resz
movq input_y, %rcx
load6(%r8,%r9,%r10,%r11,%rbx,%rbp,z_2)
movq %r8, %rax
movq %r9, %rdx
orq %r10, %rax
orq %r11, %rdx
orq %rbx, %rax
orq %rbp, %rdx
orq %rdx, %rax
negq %rax
sbbq %rax, %rax
movq input_x, %rsi
load6(%r12,%r13,%r14,%r15,%rdx,%rcx,z_1)
cmovzq %r12, %r8
cmovzq %r13, %r9
cmovzq %r14, %r10
cmovzq %r15, %r11
cmovzq %rdx, %rbx
cmovzq %rcx, %rbp
orq %r13, %r12
orq %r15, %r14
orq %rcx, %rdx
orq %r14, %r12
orq %r12, %rdx
negq %rdx
sbbq %rdx, %rdx
cmpq %rdx, %rax
czload6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz)
store6(resz,%r8,%r9,%r10,%r11,%rbx,%rbp)
// Multiplex the x and y outputs too, keeping the results in registers
movq input_y, %rcx
movq input_x, %rsi
muxload6(%r8,%r9,%r10,%r11,%rbx,%rbp,resx,x_1,x_2)
muxload6(%r12,%r13,%r14,%r15,%rdx,%rax,resy,y_1,y_2)
// Finally store back the multiplexed values
store6(x_3,%r8,%r9,%r10,%r11,%rbx,%rbp)
load6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz)
store6(y_3,%r12,%r13,%r14,%r15,%rdx,%rax)
store6(z_3,%r8,%r9,%r10,%r11,%rbx,%rbp)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p384_montjadd_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 3,645
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_add_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Add modulo p_384, z := (x + y) mod p_384, assuming x and y reduced
// Inputs x[6], y[6]; output z[6]
//
// extern void bignum_add_p384(uint64_t z[static 6], const uint64_t x[static 6],
// const uint64_t y[static 6]);
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_add_p384)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_add_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_add_p384)
.text
#define z %rdi
#define x %rsi
#define y %rdx
#define d0 %rax
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define d4 %r10
#define d5 %r11
// Re-use the input pointers as temporaries once we're done
#define a %rsi
#define c %rdx
#define ashort %esi
#define cshort %edx
S2N_BN_SYMBOL(bignum_add_p384):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Add the inputs as 2^384 * c + [d5;d4;d3;d2;d1;d0] = x + y
// This could be combined with the next block using ADCX and ADOX.
movq (x), d0
addq (y), d0
movq 8(x), d1
adcq 8(y), d1
movq 16(x), d2
adcq 16(y), d2
movq 24(x), d3
adcq 24(y), d3
movq 32(x), d4
adcq 32(y), d4
movq 40(x), d5
adcq 40(y), d5
movl $0, cshort
adcq c, c
// Now subtract p_384 from 2^384 * c + [d5;d4;d3;d2;d1;d0] to get x + y - p_384
// This is actually done by *adding* the 7-word negation r_384 = 2^448 - p_384
// where r_384 = [-1; 0; 0; 0; 1; 0x00000000ffffffff; 0xffffffff00000001]
movq $0xffffffff00000001, a
addq a, d0
movl $0x00000000ffffffff, ashort
adcq a, d1
adcq $1, d2
adcq $0, d3
adcq $0, d4
adcq $0, d5
adcq $-1, c
// Since by hypothesis x < p_384 we know x + y - p_384 < 2^384, so the top
// carry c actually gives us a bitmask for x + y - p_384 < 0, which we
// now use to make r' = mask * (2^384 - p_384) for a compensating subtraction.
// We don't quite have enough ABI-modifiable registers to create all three
// nonzero digits of r while maintaining d0..d5, but make the first two now.
andq a, c // c = masked 0x00000000ffffffff
xorq a, a
subq c, a // a = masked 0xffffffff00000001
// Do the first two digits of addition and writeback
subq a, d0
movq d0, (z)
sbbq c, d1
movq d1, 8(z)
// Preserve the carry chain while creating the extra masked digit since
// the logical operation will clear CF
sbbq d0, d0
andq a, c // c = masked 0x0000000000000001
negq d0
// Do the rest of the addition and writeback
sbbq c, d2
movq d2, 16(z)
sbbq $0, d3
movq d3, 24(z)
sbbq $0, d4
movq d4, 32(z)
sbbq $0, d5
movq d5, 40(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_add_p384)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 4,543
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_cmul_p384_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply by a single word modulo p_384, z := (c * x) mod p_384, assuming
// x reduced
// Inputs c, x[6]; output z[6]
//
// extern void bignum_cmul_p384_alt(uint64_t z[static 6], uint64_t c,
// const uint64_t x[static 6]);
//
// Standard x86-64 ABI: RDI = z, RSI = c, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = c, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p384_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p384_alt)
.text
#define z %rdi
// Temporarily moved here for initial multiply
#define x %rcx
// Likewise this is thrown away after initial multiply
#define m %rsi
#define a %rax
#define c %rcx
#define d %rdx
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
#define d4 %r12
#define d5 %rsi
// Multiplier again for second stage
#define q %rcx
#define ashort %eax
#define dshort %edx
#define cshort %ecx
#define qshort %ecx
S2N_BN_SYMBOL(bignum_cmul_p384_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// We seem to need (just!) one extra register, which we need to save and restore
CFI_PUSH(%r12)
// Shuffle inputs (since we want %rdx for the high parts of products)
movq %rdx, x
// Multiply, accumulating the result as 2^384 * h + [d5;d4;d3;d2;d1;d0]
// but actually immediately producing q = h + 1, our quotient approximation,
// by adding 1 to it. Note that by hypothesis x is reduced mod p_384, so our
// product is <= (2^64 - 1) * (p_384 - 1) and hence h <= 2^64 - 2, meaning
// there is no danger this addition of 1 could wrap.
movq (x), a
mulq m
movq a, d0
movq d, d1
movq 8(x), a
mulq m
xorq d2, d2
addq a, d1
adcq d, d2
movq 16(x), a
mulq m
xorq d3, d3
addq a, d2
adcq d, d3
movq 24(x), a
mulq m
xorq d4, d4
addq a, d3
adcq d, d4
movq 32(x), a
mulq m
addq a, d4
adcq $0, d
movq m, a
movq d, d5
mulq 40(x)
movl $1, qshort
addq a, d5
adcq d, q
// It's easy to see -p_384 <= z - q * p_384 < p_384, so we just need to
// subtract q * p_384 and then correct if that is negative by adding p_384.
//
// Write p_384 = 2^384 - r where r = 2^128 + 2^96 - 2^32 + 1
//
// We want z - q * (2^384 - r)
// = (2^384 * h + l) - q * (2^384 - r)
// = 2^384 * (h - q) + (l + q * r)
// = 2^384 * (-1) + (l + q * r)
movq $0xffffffff00000001, a
mulq q
addq a, d0
adcq d, d1
adcq q, d2
movq q, a
sbbq c, c
movl $0x00000000ffffffff, dshort
negq c
mulq d
addq a, d1
adcq d, d2
adcq c, d3
adcq $0, d4
adcq $0, d5
sbbq c, c
notq c
// The net c value is now the top word of the 7-word answer, hence will
// be -1 if we need a corrective addition, 0 otherwise, usable as a mask.
// Now use that mask for a masked addition of p_384, which again is in
// fact done by a masked subtraction of 2^384 - p_384, so that we only
// have three nonzero digits and so can avoid using another register.
movl $0x00000000ffffffff, dshort
xorq a, a
andq c, d
subq d, a
andq $1, c
subq a, d0
movq d0, (z)
sbbq d, d1
movq d1, 8(z)
sbbq c, d2
movq d2, 16(z)
sbbq $0, d3
movq d3, 24(z)
sbbq $0, d4
movq d4, 32(z)
sbbq $0, d5
movq d5, 40(z)
// Return
CFI_POP(%r12)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_cmul_p384_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 10,124
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_tomont_p384_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert to Montgomery form z := (2^384 * x) mod p_384
// Input x[6]; output z[6]
//
// extern void bignum_tomont_p384_alt(uint64_t z[static 6],
// const uint64_t x[static 6]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p384_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_p384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p384_alt)
.text
#define z %rdi
#define x %rsi
// Some temp registers for the last correction stage
#define d %rax
#define u %rdx
#define v %rcx
#define w %rsi
#define vshort %ecx
#define wshort %esi
// Add %rbx * m into a register-pair (high,low) maintaining consistent
// carry-catching with carry (negated, as bitmask) and using %rax and %rdx
// as temporaries
#define mulpadd(carry,high,low,m) \
movq m, %rax ; \
mulq %rbx; \
subq carry, %rdx ; \
addq %rax, low ; \
adcq %rdx, high ; \
sbbq carry, carry
// Initial version assuming no carry-in
#define mulpadi(carry,high,low,m) \
movq m, %rax ; \
mulq %rbx; \
addq %rax, low ; \
adcq %rdx, high ; \
sbbq carry, carry
// End version not catching the top carry-out
#define mulpade(carry,high,low,m) \
movq m, %rax ; \
mulq %rbx; \
subq carry, %rdx ; \
addq %rax, low ; \
adcq %rdx, high
// Core one-step Montgomery reduction macro. Takes input in
// [d7;d6;d5;d4;d3;d2;d1;d0] and returns result in [d7;d6;d5;d4;d3;d2;d1],
// adding to the existing contents, re-using d0 as a temporary internally
//
// We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w
// where w = [d0 + (d0<<32)] mod 2^64
//
// montredc(d7,d6,d5,d4,d3,d2,d1,d0)
//
// This particular variant, with its mix of addition and subtraction
// at the top, is not intended to maintain a coherent carry or borrow out.
// It is assumed the final result would fit in [d7;d6;d5;d4;d3;d2;d1].
// which is always the case here as the top word is even always in {0,1}
#define montredc(d7,d6,d5,d4,d3,d2,d1,d0) \
/* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \
movq d0, %rbx ; \
shlq $32, %rbx ; \
addq d0, %rbx ; \
/* Construct [%rcx;%rdx;%rax;-] = (2^384 - p_384) * w */ \
/* We know the lowest word will cancel so we can re-use d0 as a temp */ \
xorl %ecx, %ecx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, d0 ; \
movq $0x00000000ffffffff, %rax ; \
mulq %rbx; \
addq d0, %rax ; \
adcq %rbx, %rdx ; \
adcl %ecx, %ecx ; \
/* Now subtract that and add 2^384 * w */ \
subq %rax, d1 ; \
sbbq %rdx, d2 ; \
sbbq %rcx, d3 ; \
sbbq $0, d4 ; \
sbbq $0, d5 ; \
sbbq $0, %rbx ; \
addq %rbx, d6 ; \
adcq $0, d7
S2N_BN_SYMBOL(bignum_tomont_p384_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// We are essentially just doing a Montgomery multiplication of x and the
// precomputed constant y = 2^768 mod p, so the code is almost the same
// modulo a few registers and the change from loading y[i] to using constants,
// plus the easy digits y[4] = 1 and y[5] = 0 being treated specially.
// Because there is no y pointer to keep, we use one register less.
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Do row 0 computation, which is a bit different:
// set up initial window [%r14,%r13,%r12,%r11,%r10,%r9,%r8] = y[0] * x
// Unlike later, we only need a single carry chain
movq $0xfffffffe00000001, %rbx
movq (x), %rax
mulq %rbx
movq %rax, %r8
movq %rdx, %r9
movq 8(x), %rax
mulq %rbx
xorl %r10d, %r10d
addq %rax, %r9
adcq %rdx, %r10
movq 16(x), %rax
mulq %rbx
xorl %r11d, %r11d
addq %rax, %r10
adcq %rdx, %r11
movq 24(x), %rax
mulq %rbx
xorl %r12d, %r12d
addq %rax, %r11
adcq %rdx, %r12
movq 32(x), %rax
mulq %rbx
xorl %r13d, %r13d
addq %rax, %r12
adcq %rdx, %r13
movq 40(x), %rax
mulq %rbx
xorl %r14d, %r14d
addq %rax, %r13
adcq %rdx, %r14
xorl %r15d, %r15d
// Montgomery reduce the zeroth window
montredc(%r15, %r14,%r13,%r12,%r11,%r10,%r9,%r8)
// Add row 1
movq $0x0000000200000000, %rbx
mulpadi(%r8,%r10,%r9,(x))
mulpadd(%r8,%r11,%r10,8(x))
mulpadd(%r8,%r12,%r11,16(x))
mulpadd(%r8,%r13,%r12,24(x))
mulpadd(%r8,%r14,%r13,32(x))
mulpadd(%r8,%r15,%r14,40(x))
negq %r8
// Montgomery reduce window 1
montredc(%r8, %r15,%r14,%r13,%r12,%r11,%r10,%r9)
// Add row 2
movq $0xfffffffe00000000, %rbx
mulpadi(%r9,%r11,%r10,(x))
mulpadd(%r9,%r12,%r11,8(x))
mulpadd(%r9,%r13,%r12,16(x))
mulpadd(%r9,%r14,%r13,24(x))
mulpadd(%r9,%r15,%r14,32(x))
mulpadd(%r9,%r8,%r15,40(x))
negq %r9
// Montgomery reduce window 2
montredc(%r9, %r8,%r15,%r14,%r13,%r12,%r11,%r10)
// Add row 3
movq $0x0000000200000000, %rbx
mulpadi(%r10,%r12,%r11,(x))
mulpadd(%r10,%r13,%r12,8(x))
mulpadd(%r10,%r14,%r13,16(x))
mulpadd(%r10,%r15,%r14,24(x))
mulpadd(%r10,%r8,%r15,32(x))
mulpadd(%r10,%r9,%r8,40(x))
negq %r10
// Montgomery reduce window 3
montredc(%r10, %r9,%r8,%r15,%r14,%r13,%r12,%r11)
// Add row 4. The multiplier y[4] = 1, so we just add x to the window
// while extending it with one more digit, initially this carry
xorq %r11, %r11
addq (x), %r12
adcq 8(x), %r13
adcq 16(x), %r14
adcq 24(x), %r15
adcq 32(x), %r8
adcq 40(x), %r9
adcq %r11, %r10
adcq %r11, %r11
// Montgomery reduce window 4
montredc(%r11, %r10,%r9,%r8,%r15,%r14,%r13,%r12)
// Add row 5, The multiplier y[5] = 0, so this is trivial: all we do is
// bring down another zero digit into the window.
xorq %r12, %r12
// Montgomery reduce window 5
montredc(%r12, %r11,%r10,%r9,%r8,%r15,%r14,%r13)
// We now have a pre-reduced 7-word form [%r12;%r11;%r10;%r9;%r8;%r15;%r14]
// We know, writing B = 2^{6*64} that the full implicit result is
// B^2 c <= z + (B - 1) * p < B * p + (B - 1) * p < 2 * B * p,
// so the top half is certainly < 2 * p. If c = 1 already, we know
// subtracting p will give the reduced modulus. But now we do a
// comparison to catch cases where the residue is >= p.
// First set [0;0;0;w;v;u] = 2^384 - p_384
movq $0xffffffff00000001, u
movl $0x00000000ffffffff, vshort
movl $0x0000000000000001, wshort
// Let dd = [%r11;%r10;%r9;%r8;%r15;%r14] be the topless 6-word intermediate result.
// Set CF if the addition dd + (2^384 - p_384) >= 2^384, hence iff dd >= p_384.
movq %r14, d
addq u, d
movq %r15, d
adcq v, d
movq %r8, d
adcq w, d
movq %r9, d
adcq $0, d
movq %r10, d
adcq $0, d
movq %r11, d
adcq $0, d
// Now just add this new carry into the existing %r12. It's easy to see they
// can't both be 1 by our range assumptions, so this gives us a {0,1} flag
adcq $0, %r12
// Now convert it into a bitmask
negq %r12
// Masked addition of 2^384 - p_384, hence subtraction of p_384
andq %r12, u
andq %r12, v
andq %r12, w
addq u, %r14
adcq v, %r15
adcq w, %r8
adcq $0, %r9
adcq $0, %r10
adcq $0, %r11
// Write back the result
movq %r14, (z)
movq %r15, 8(z)
movq %r8, 16(z)
movq %r9, 24(z)
movq %r10, 32(z)
movq %r11, 40(z)
// Restore registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_tomont_p384_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 1,573
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_nonzero_6.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// 384-bit nonzeroness test, returning 1 if x is nonzero, 0 if x is zero
// Input x[6]; output function return
//
// extern uint64_t bignum_nonzero_6(const uint64_t x[static 6]);
//
// Standard x86-64 ABI: RDI = x, returns RAX
// Microsoft x64 ABI: RCX = x, returns RAX
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_nonzero_6)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_nonzero_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_nonzero_6)
.text
#define x %rdi
#define a %rax
#define d %rdx
#define dshort %edx
S2N_BN_SYMBOL(bignum_nonzero_6):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
#endif
// Generate a = an OR of all the words in the bignum
movq (x), a
movq 8(x), d
orq 16(x), a
orq 24(x), d
orq 32(x), a
orq 40(x), d
orq d, a
// Set a standard C condition based on whether a is nonzero
movl $1, dshort
cmovnzq d, a
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_nonzero_6)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 4,680
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_demont_p384_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from Montgomery form z := (x / 2^384) mod p_384, assuming x reduced
// Input x[6]; output z[6]
//
// extern void bignum_demont_p384_alt(uint64_t z[static 6],
// const uint64_t x[static 6]);
//
// This assumes the input is < p_384 for correctness. If this is not the case,
// use the variant "bignum_deamont_p384" instead.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p384_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_demont_p384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p384_alt)
.text
#define z %rdi
#define x %rsi
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1],
// adding to the existing [d5;d4;d3;d2;d1] and re-using d0 as a
// temporary internally, as well as %rax, %rcx and %rdx.
// It is OK for d6 and d0 to be the same register (they often are)
//
// We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w
// where w = [d0 + (d0<<32)] mod 2^64
//
// montreds(d6,d5,d4,d3,d2,d1,d0)
#define montreds(d6,d5,d4,d3,d2,d1,d0) \
/* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \
movq d0, %rcx ; \
shlq $32, %rcx ; \
addq d0, %rcx ; \
/* Construct [%rax;%rdx;d0;-] = (2^384 - p_384) * w */ \
/* We know the lowest word will cancel so we can re-use d0 */ \
/* and %rcx as temps. */ \
movq $0xffffffff00000001, %rax ; \
mulq %rcx; \
movq %rdx, d0 ; \
movq $0x00000000ffffffff, %rax ; \
mulq %rcx; \
addq %rax, d0 ; \
movl $0, %eax ; \
adcq %rcx, %rdx ; \
adcl %eax, %eax ; \
/* Now subtract that and add 2^384 * w */ \
subq d0, d1 ; \
sbbq %rdx, d2 ; \
sbbq %rax, d3 ; \
sbbq $0, d4 ; \
sbbq $0, d5 ; \
movq %rcx, d6 ; \
sbbq $0, d6
S2N_BN_SYMBOL(bignum_demont_p384_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with
CFI_PUSH(%r12)
CFI_PUSH(%r13)
// Set up an initial window [%r13,%r12,%r11,%r10,%r9,%r8] = x
movq (x), %r8
movq 8(x), %r9
movq 16(x), %r10
movq 24(x), %r11
movq 32(x), %r12
movq 40(x), %r13
// Montgomery reduce window 0
montreds(%r8,%r13,%r12,%r11,%r10,%r9,%r8)
// Montgomery reduce window 1
montreds(%r9,%r8,%r13,%r12,%r11,%r10,%r9)
// Montgomery reduce window 2
montreds(%r10,%r9,%r8,%r13,%r12,%r11,%r10)
// Montgomery reduce window 3
montreds(%r11,%r10,%r9,%r8,%r13,%r12,%r11)
// Montgomery reduce window 4
montreds(%r12,%r11,%r10,%r9,%r8,%r13,%r12)
// Montgomery reduce window 5
montreds(%r13,%r12,%r11,%r10,%r9,%r8,%r13)
// Write back the result
movq %r8, (z)
movq %r9, 8(z)
movq %r10, 16(z)
movq %r11, 24(z)
movq %r12, 32(z)
movq %r13, 40(z)
// Restore registers and return
CFI_POP(%r13)
CFI_POP(%r12)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_demont_p384_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 57,090
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/p384_montjdouble_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjdouble_alt(uint64_t p3[static 18],
// const uint64_t p1[static 18]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard x86-64 ABI: RDI = p3, RSI = p1
// Microsoft x64 ABI: RCX = p3, RDX = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjdouble_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjdouble_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjdouble_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1. The latter stays true
// but montsqr below modifies %rdi as well. Thus, we need
// to save %rdi and restore it before the writes to outputs.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z2 (NUMSIZE*0)(%rsp)
#define y2 (NUMSIZE*1)(%rsp)
#define x2p (NUMSIZE*2)(%rsp)
#define xy2 (NUMSIZE*3)(%rsp)
#define y4 (NUMSIZE*4)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define dx2 (NUMSIZE*5)(%rsp)
#define t1 (NUMSIZE*5)(%rsp)
#define d (NUMSIZE*6)(%rsp)
#define x4p (NUMSIZE*6)(%rsp)
// Safe place for pointer to the output
#define input_z (NUMSIZE*7)(%rsp)
#define NSPACE 344
// Corresponds exactly to bignum_montmul_p384_alt
#define montmul_p384(P0,P1,P2) \
movq P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
xorl %r10d, %r10d ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
xorl %r11d, %r11d ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
xorl %r12d, %r12d ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
xorl %r13d, %r13d ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
xorl %r14d, %r14d ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
xorl %r15d, %r15d ; \
movq %r8, %rbx ; \
shlq $0x20, %rbx ; \
addq %r8, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r8 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r8, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbp, %r11 ; \
sbbq $0x0, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x8+P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %r8, %r8 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %r8, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %r8, %r8 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
subq %r8, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %r8, %r8 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %r8, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r8, %r8 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %r8, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %r8, %r8 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %r8, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %r8, %r8 ; \
negq %r8; \
movq %r9, %rbx ; \
shlq $0x20, %rbx ; \
addq %r9, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r9 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r9, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbp, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %r14 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x10+P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %r9, %r9 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %r9, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %r9, %r9 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
subq %r9, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r9, %r9 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %r9, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %r9, %r9 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %r9, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %r9, %r9 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %r9, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
sbbq %r9, %r9 ; \
negq %r9; \
movq %r10, %rbx ; \
shlq $0x20, %rbx ; \
addq %r10, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r10 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r10, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r11 ; \
sbbq %rdx, %r12 ; \
sbbq %rbp, %r13 ; \
sbbq $0x0, %r14 ; \
sbbq $0x0, %r15 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x18+P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %r10, %r10 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %r10, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r10, %r10 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
subq %r10, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %r10, %r10 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %r10, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %r10, %r10 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %r10, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
sbbq %r10, %r10 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %r10, %rdx ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %r10, %r10 ; \
negq %r10; \
movq %r11, %rbx ; \
shlq $0x20, %rbx ; \
addq %r11, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r11 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r11, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r12 ; \
sbbq %rdx, %r13 ; \
sbbq %rbp, %r14 ; \
sbbq $0x0, %r15 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r11, %r11 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %r11, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %r11, %r11 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
subq %r11, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %r11, %r11 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %r11, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
sbbq %r11, %r11 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %r11, %rdx ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %r11, %r11 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %r11, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %r11, %r11 ; \
negq %r11; \
movq %r12, %rbx ; \
shlq $0x20, %rbx ; \
addq %r12, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r12 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r12, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r13 ; \
sbbq %rdx, %r14 ; \
sbbq %rbp, %r15 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x28+P2, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %r12, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %r12, %r12 ; \
movq 0x10+P1, %rax ; \
mulq %rbx; \
subq %r12, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
sbbq %r12, %r12 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %r12, %rdx ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %r12, %r12 ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %r12, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %r12, %r12 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %r12, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %r12, %r12 ; \
negq %r12; \
movq %r13, %rbx ; \
shlq $0x20, %rbx ; \
addq %r13, %rbx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r13 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %r13, %rax ; \
adcq %rbx, %rdx ; \
adcl %ebp, %ebp ; \
subq %rax, %r14 ; \
sbbq %rdx, %r15 ; \
sbbq %rbp, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %r10 ; \
sbbq $0x0, %rbx ; \
addq %rbx, %r11 ; \
adcq $0x0, %r12 ; \
xorl %edx, %edx ; \
xorl %ebp, %ebp ; \
xorl %r13d, %r13d ; \
movq $0xffffffff00000001, %rax ; \
addq %r14, %rax ; \
movl $0xffffffff, %ebx ; \
adcq %r15, %rbx ; \
movl $0x1, %ecx ; \
adcq %r8, %rcx ; \
adcq %r9, %rdx ; \
adcq %r10, %rbp ; \
adcq %r11, %r13 ; \
adcq $0x0, %r12 ; \
cmovneq %rax, %r14 ; \
cmovneq %rbx, %r15 ; \
cmovneq %rcx, %r8 ; \
cmovneq %rdx, %r9 ; \
cmovneq %rbp, %r10 ; \
cmovneq %r13, %r11 ; \
movq %r14, P0 ; \
movq %r15, 0x8+P0 ; \
movq %r8, 0x10+P0 ; \
movq %r9, 0x18+P0 ; \
movq %r10, 0x20+P0 ; \
movq %r11, 0x28+P0
// Corresponds exactly to bignum_montsqr_p384_alt except %rsi -> %rdi
#define montsqr_p384(P0,P1) \
movq P1, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
movq %rax, %r9 ; \
movq %rdx, %r10 ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
movq %rax, %r11 ; \
movq %rdx, %r12 ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
movq %rax, %r13 ; \
movq %rdx, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x20+P1; \
movq %rax, %r15 ; \
movq %rdx, %rcx ; \
movq 0x10+P1, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbp, %rbp ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %rbp, %rbp ; \
movq 0x8+P1, %rbx ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %rbp, %rbp ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %rbp, %rbp ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %rcx ; \
movq 0x20+P1, %rbx ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %rbp, %rbp ; \
movq 0x10+P1, %rbx ; \
movq 0x18+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %rbp, %rbp ; \
movq 0x20+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %rbp, %rbp ; \
movq 0x28+P1, %rax ; \
mulq %rbx; \
subq %rbp, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %rcx ; \
sbbq %rbp, %rbp ; \
xorl %ebx, %ebx ; \
movq 0x18+P1, %rax ; \
mulq 0x28+P1; \
subq %rbp, %rdx ; \
xorl %ebp, %ebp ; \
addq %rax, %rcx ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
movq 0x20+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
xorl %r8d, %r8d ; \
addq %r9, %r9 ; \
adcq %r10, %r10 ; \
adcq %r11, %r11 ; \
adcq %r12, %r12 ; \
adcq %r13, %r13 ; \
adcq %r14, %r14 ; \
adcq %r15, %r15 ; \
adcq %rcx, %rcx ; \
adcq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcl %r8d, %r8d ; \
movq P1, %rax ; \
mulq %rax; \
movq %r8, P0 ; \
movq %rax, %r8 ; \
movq 0x8+P1, %rax ; \
movq %rbp, 0x8+P0 ; \
addq %rdx, %r9 ; \
sbbq %rbp, %rbp ; \
mulq %rax; \
negq %rbp; \
adcq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbp, %rbp ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
negq %rbp; \
adcq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %rbp, %rbp ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
negq %rbp; \
adcq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %rbp, %rbp ; \
movq 0x20+P1, %rax ; \
mulq %rax; \
negq %rbp; \
adcq %rax, %rcx ; \
adcq %rdx, %rbx ; \
sbbq %rbp, %rbp ; \
movq 0x28+P1, %rax ; \
mulq %rax; \
negq %rbp; \
adcq 0x8+P0, %rax ; \
adcq P0, %rdx ; \
movq %rax, %rbp ; \
movq %rdx, %rdi ; \
movq %rbx, P0 ; \
movq %r8, %rbx ; \
shlq $0x20, %rbx ; \
addq %r8, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r8 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r8, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rax, %r11 ; \
sbbq $0x0, %r12 ; \
sbbq $0x0, %r13 ; \
movq %rbx, %r8 ; \
sbbq $0x0, %r8 ; \
movq %r9, %rbx ; \
shlq $0x20, %rbx ; \
addq %r9, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r9 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r9 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r9, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rax, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %r8 ; \
movq %rbx, %r9 ; \
sbbq $0x0, %r9 ; \
movq %r10, %rbx ; \
shlq $0x20, %rbx ; \
addq %r10, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r10 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r10 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r10, %r11 ; \
sbbq %rdx, %r12 ; \
sbbq %rax, %r13 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %r9 ; \
movq %rbx, %r10 ; \
sbbq $0x0, %r10 ; \
movq %r11, %rbx ; \
shlq $0x20, %rbx ; \
addq %r11, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r11 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r11 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r11, %r12 ; \
sbbq %rdx, %r13 ; \
sbbq %rax, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %r10 ; \
movq %rbx, %r11 ; \
sbbq $0x0, %r11 ; \
movq %r12, %rbx ; \
shlq $0x20, %rbx ; \
addq %r12, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r12 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r12 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r12, %r13 ; \
sbbq %rdx, %r8 ; \
sbbq %rax, %r9 ; \
sbbq $0x0, %r10 ; \
sbbq $0x0, %r11 ; \
movq %rbx, %r12 ; \
sbbq $0x0, %r12 ; \
movq %r13, %rbx ; \
shlq $0x20, %rbx ; \
addq %r13, %rbx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rbx; \
movq %rdx, %r13 ; \
movq $0xffffffff, %rax ; \
mulq %rbx; \
addq %rax, %r13 ; \
movl $0x0, %eax ; \
adcq %rbx, %rdx ; \
adcl %eax, %eax ; \
subq %r13, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rax, %r10 ; \
sbbq $0x0, %r11 ; \
sbbq $0x0, %r12 ; \
movq %rbx, %r13 ; \
sbbq $0x0, %r13 ; \
movq P0, %rbx ; \
addq %r8, %r14 ; \
adcq %r9, %r15 ; \
adcq %r10, %rcx ; \
adcq %r11, %rbx ; \
adcq %r12, %rbp ; \
adcq %r13, %rdi ; \
movl $0x0, %r8d ; \
adcq %r8, %r8 ; \
xorq %r11, %r11 ; \
xorq %r12, %r12 ; \
xorq %r13, %r13 ; \
movq $0xffffffff00000001, %rax ; \
addq %r14, %rax ; \
movl $0xffffffff, %r9d ; \
adcq %r15, %r9 ; \
movl $0x1, %r10d ; \
adcq %rcx, %r10 ; \
adcq %rbx, %r11 ; \
adcq %rbp, %r12 ; \
adcq %rdi, %r13 ; \
adcq $0x0, %r8 ; \
cmovneq %rax, %r14 ; \
cmovneq %r9, %r15 ; \
cmovneq %r10, %rcx ; \
cmovneq %r11, %rbx ; \
cmovneq %r12, %rbp ; \
cmovneq %r13, %rdi ; \
movq %r14, P0 ; \
movq %r15, 0x8+P0 ; \
movq %rcx, 0x10+P0 ; \
movq %rbx, 0x18+P0 ; \
movq %rbp, 0x20+P0 ; \
movq %rdi, 0x28+P0
#define sub_p384(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rdx ; \
sbbq 0x8+P2, %rdx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
sbbq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
sbbq 0x28+P2, %r11 ; \
sbbq %rcx, %rcx ; \
movl $0xffffffff, %ebx ; \
andq %rbx, %rcx ; \
xorq %rbx, %rbx ; \
subq %rcx, %rbx ; \
subq %rbx, %rax ; \
movq %rax, P0 ; \
sbbq %rcx, %rdx ; \
movq %rdx, 0x8+P0 ; \
sbbq %rax, %rax ; \
andq %rbx, %rcx ; \
negq %rax; \
sbbq %rcx, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0
// Simplified bignum_add_p384, without carry chain suspension
#define add_p384(P0,P1,P2) \
movq P1, %rax ; \
addq P2, %rax ; \
movq 0x8+P1, %rcx ; \
adcq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
adcq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
adcq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
adcq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
adcq 0x28+P2, %r11 ; \
movl $0x0, %edx ; \
adcq %rdx, %rdx ; \
movq $0xffffffff00000001, %rbp ; \
addq %rbp, %rax ; \
movl $0xffffffff, %ebp ; \
adcq %rbp, %rcx ; \
adcq $0x1, %r8 ; \
adcq $0x0, %r9 ; \
adcq $0x0, %r10 ; \
adcq $0x0, %r11 ; \
adcq $0xffffffffffffffff, %rdx ; \
movl $1, %ebx ; \
andq %rdx, %rbx ; \
andq %rbp, %rdx ; \
xorq %rbp, %rbp ; \
subq %rdx, %rbp ; \
subq %rbp, %rax ; \
movq %rax, P0 ; \
sbbq %rdx, %rcx ; \
movq %rcx, 0x8+P0 ; \
sbbq %rbx, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0
// P0 = 4 * P1 - P2
#define cmsub41_p384(P0,P1,P2) \
movq 40+P1, %rcx ; \
movq %rcx, %r13 ; \
shrq $62, %rcx ; \
movq 32+P1, %r12 ; \
shldq $2, %r12, %r13 ; \
movq 24+P1, %r11 ; \
shldq $2, %r11, %r12 ; \
movq 16+P1, %r10 ; \
shldq $2, %r10, %r11 ; \
movq 8+P1, %r9 ; \
shldq $2, %r9, %r10 ; \
movq P1, %r8 ; \
shldq $2, %r8, %r9 ; \
shlq $2, %r8 ; \
addq $1, %rcx ; \
subq P2, %r8 ; \
sbbq 0x8+P2, %r9 ; \
sbbq 0x10+P2, %r10 ; \
sbbq 0x18+P2, %r11 ; \
sbbq 0x20+P2, %r12 ; \
sbbq 0x28+P2, %r13 ; \
sbbq $0, %rcx ; \
movq $0xffffffff00000001, %rax ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rcx, %r10 ; \
movq %rcx, %rax ; \
sbbq %rcx, %rcx ; \
movl $0xffffffff, %edx ; \
negq %rcx; \
mulq %rdx; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %rcx, %r11 ; \
adcq $0x0, %r12 ; \
adcq $0x0, %r13 ; \
sbbq %rcx, %rcx ; \
notq %rcx; \
movl $0xffffffff, %edx ; \
xorq %rax, %rax ; \
andq %rcx, %rdx ; \
subq %rdx, %rax ; \
andq $0x1, %rcx ; \
subq %rax, %r8 ; \
movq %r8, P0 ; \
sbbq %rdx, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq %rcx, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0
// P0 = C * P1 - D * P2
#define cmsub_p384(P0,C,P1,D,P2) \
movq $0x00000000ffffffff, %r9 ; \
subq P2, %r9 ; \
movq $0xffffffff00000000, %r10 ; \
sbbq 8+P2, %r10 ; \
movq $0xfffffffffffffffe, %r11 ; \
sbbq 16+P2, %r11 ; \
movq $0xffffffffffffffff, %r12 ; \
sbbq 24+P2, %r12 ; \
movq $0xffffffffffffffff, %r13 ; \
sbbq 32+P2, %r13 ; \
movq $0xffffffffffffffff, %r14 ; \
sbbq 40+P2, %r14 ; \
movq $D, %rcx ; \
movq %r9, %rax ; \
mulq %rcx; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
movq %r10, %rax ; \
xorl %r10d, %r10d ; \
mulq %rcx; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq %r11, %rax ; \
xorl %r11d, %r11d ; \
mulq %rcx; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
movq %r12, %rax ; \
xorl %r12d, %r12d ; \
mulq %rcx; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
movq %r13, %rax ; \
xorl %r13d, %r13d ; \
mulq %rcx; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
movq %r14, %rax ; \
movl $1, %r14d ; \
mulq %rcx; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
movl $C, %ecx ; \
movq P1, %rax ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rbx, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rbx, %rbx ; \
movq 0x10+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbx, %rbx ; \
movq 0x18+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %rbx, %rbx ; \
movq 0x20+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %rbx, %rbx ; \
movq 0x28+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
movq $0xffffffff00000001, %rax ; \
mulq %r14; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %r14, %r10 ; \
movq %r14, %rax ; \
sbbq %rcx, %rcx ; \
movl $0xffffffff, %edx ; \
negq %rcx; \
mulq %rdx; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %rcx, %r11 ; \
adcq $0x0, %r12 ; \
adcq $0x0, %r13 ; \
sbbq %rcx, %rcx ; \
notq %rcx; \
movl $0xffffffff, %edx ; \
xorq %rax, %rax ; \
andq %rcx, %rdx ; \
subq %rdx, %rax ; \
andq $0x1, %rcx ; \
subq %rax, %r8 ; \
movq %r8, P0 ; \
sbbq %rdx, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq %rcx, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0
// A weak version of add that only guarantees sum in 6 digits
#define weakadd_p384(P0,P1,P2) \
movq P1, %rax ; \
addq P2, %rax ; \
movq 0x8+P1, %rcx ; \
adcq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
adcq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
adcq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
adcq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
adcq 0x28+P2, %r11 ; \
sbbq %rdx, %rdx ; \
movl $1, %ebx ; \
andq %rdx, %rbx ; \
movl $0xffffffff, %ebp ; \
andq %rbp, %rdx ; \
xorq %rbp, %rbp ; \
subq %rdx, %rbp ; \
addq %rbp, %rax ; \
movq %rax, P0 ; \
adcq %rdx, %rcx ; \
movq %rcx, 0x8+P0 ; \
adcq %rbx, %r8 ; \
movq %r8, 0x10+P0 ; \
adcq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
adcq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
adcq $0x0, %r11 ; \
movq %r11, 0x28+P0
// P0 = 3 * P1 - 8 * P2
#define cmsub38_p384(P0,P1,P2) \
movq $0x00000000ffffffff, %r8 ; \
subq P2, %r8 ; \
movq $0xffffffff00000000, %r9 ; \
sbbq 8+P2, %r9 ; \
movq $0xfffffffffffffffe, %r10 ; \
sbbq 16+P2, %r10 ; \
movq $0xffffffffffffffff, %r11 ; \
sbbq 24+P2, %r11 ; \
movq $0xffffffffffffffff, %r12 ; \
sbbq 32+P2, %r12 ; \
movq $0xffffffffffffffff, %r13 ; \
sbbq 40+P2, %r13 ; \
movq %r13, %r14 ; \
shrq $61, %r14 ; \
shldq $3, %r12, %r13 ; \
shldq $3, %r11, %r12 ; \
shldq $3, %r10, %r11 ; \
shldq $3, %r9, %r10 ; \
shldq $3, %r8, %r9 ; \
shlq $3, %r8 ; \
addq $1, %r14 ; \
movl $3, %ecx ; \
movq P1, %rax ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rbx, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rbx, %rbx ; \
movq 0x10+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbx, %rbx ; \
movq 0x18+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %rbx, %rbx ; \
movq 0x20+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %rbx, %rbx ; \
movq 0x28+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
movq $0xffffffff00000001, %rax ; \
mulq %r14; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %r14, %r10 ; \
movq %r14, %rax ; \
sbbq %rcx, %rcx ; \
movl $0xffffffff, %edx ; \
negq %rcx; \
mulq %rdx; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %rcx, %r11 ; \
adcq $0x0, %r12 ; \
adcq $0x0, %r13 ; \
sbbq %rcx, %rcx ; \
notq %rcx; \
movl $0xffffffff, %edx ; \
xorq %rax, %rax ; \
andq %rcx, %rdx ; \
subq %rdx, %rax ; \
andq $0x1, %rcx ; \
subq %rax, %r8 ; \
movq %r8, P0 ; \
sbbq %rdx, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq %rcx, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0
S2N_BN_SYMBOL(p384_montjdouble_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room on stack for temporary variables
// Save the output pointer %rdi which gets overwritten in earlier
// operations before it is used.
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rdi, input_z
// Main code, just a sequence of basic field operations
// z2 = z^2
// y2 = y^2
montsqr_p384(z2,z_1)
montsqr_p384(y2,y_1)
// x2p = x^2 - z^4 = (x + z^2) * (x - z^2)
weakadd_p384(t1,x_1,z2)
sub_p384(t2,x_1,z2)
montmul_p384(x2p,t1,t2)
// t1 = y + z
// x4p = x2p^2
// xy2 = x * y^2
add_p384(t1,y_1,z_1)
montsqr_p384(x4p,x2p)
montmul_p384(xy2,x_1,y2)
// t2 = (y + z)^2
montsqr_p384(t2,t1)
// d = 12 * xy2 - 9 * x4p
// t1 = y^2 + 2 * y * z
cmsub_p384(d,12,xy2,9,x4p)
sub_p384(t1,t2,z2)
// y4 = y^4
montsqr_p384(y4,y2)
// Restore the output pointer to write to x_3, y_3 and z_3.
movq input_z, %rdi
// z_3' = 2 * y * z
// dx2 = d * x2p
sub_p384(z_3,t1,y2)
montmul_p384(dx2,d,x2p)
// x' = 4 * xy2 - d
cmsub41_p384(x_3,xy2,d)
// y' = 3 * dx2 - 8 * y4
cmsub38_p384(y_3,dx2,y4)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p384_montjdouble_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 5,527
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_mod_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo field characteristic, z := x mod p_384
// Input x[k]; output z[6]
//
// extern void bignum_mod_p384(uint64_t z[static 6], uint64_t k,
// const uint64_t *x);
//
// Standard x86-64 ABI: RDI = z, RSI = k, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = k, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384)
.text
#define z %rdi
#define k %rsi
#define x %rcx
#define m0 %r8
#define m1 %r9
#define m2 %r10
#define m3 %r11
#define m4 %r12
#define m5 %r13
#define d %r14
#define n0 %rax
#define n1 %rbx
#define n2 %rdx
#define q %rdx
#define n0short %eax
#define n1short %ebx
#define qshort %edx
S2N_BN_SYMBOL(bignum_mod_p384):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save extra registers
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
// If the input is already <= 5 words long, go to a trivial "copy" path
cmpq $6, k
jc Lbignum_mod_p384_shortinput
// Otherwise load the top 6 digits (top-down) and reduce k by 6
subq $6, k
movq 40(%rdx,k,8), m5
movq 32(%rdx,k,8), m4
movq 24(%rdx,k,8), m3
movq 16(%rdx,k,8), m2
movq 8(%rdx,k,8), m1
movq (%rdx,k,8), m0
// Move x into another register to leave %rdx free for multiplies and use of n2
movq %rdx, x
// Reduce the top 6 digits mod p_384 (a conditional subtraction of p_384)
movl $0x00000000ffffffff, n0short
movq $0xffffffff00000000, n1
movq $0xfffffffffffffffe, n2
subq n0, m0
sbbq n1, m1
sbbq n2, m2
sbbq $-1, m3
sbbq $-1, m4
sbbq $-1, m5
sbbq d, d
andq d, n0
andq d, n1
andq d, n2
addq n0, m0
adcq n1, m1
adcq n2, m2
adcq d, m3
adcq d, m4
adcq d, m5
// Now do (k-6) iterations of 7->6 word modular reduction
testq k, k
jz Lbignum_mod_p384_writeback
Lbignum_mod_p384_loop:
// Compute q = min (m5 + 1) (2^64 - 1)
movl $1, qshort
addq m5, q
sbbq d, d
orq d, q
// Load the next digit so current m to reduce = [m5;m4;m3;m2;m1;m0;d]
movq -8(x,k,8), d
// Now form [m5;m4;m3;m2;m1;m0;d] = m - q * p_384. To use an addition for
// the main calculation we do (m - 2^384 * q) + q * (2^384 - p_384)
// where 2^384 - p_384 = [0;0;0;1;0x00000000ffffffff;0xffffffff00000001].
// The extra subtraction of 2^384 * q is the first instruction.
subq q, m5
xorq n0, n0
movq $0xffffffff00000001, n0
mulxq n0, n0, n1
adcxq n0, d
adoxq n1, m0
movl $0x00000000ffffffff, n0short
mulxq n0, n0, n1
adcxq n0, m0
adoxq n1, m1
adcxq q, m1
movl $0, n0short
adoxq n0, n0
adcxq n0, m2
adcq $0, m3
adcq $0, m4
adcq $0, m5
// Now our top word m5 is either zero or all 1s. Use it for a masked
// addition of p_384, which we can do by a *subtraction* of
// 2^384 - p_384 from our portion
movq $0xffffffff00000001, n0
andq m5, n0
movl $0x00000000ffffffff, n1short
andq m5, n1
andq $1, m5
subq n0, d
sbbq n1, m0
sbbq m5, m1
sbbq $0, m2
sbbq $0, m3
sbbq $0, m4
// Now shuffle registers up and loop
movq m4, m5
movq m3, m4
movq m2, m3
movq m1, m2
movq m0, m1
movq d, m0
decq k
jnz Lbignum_mod_p384_loop
// Write back
Lbignum_mod_p384_writeback:
movq m0, (z)
movq m1, 8(z)
movq m2, 16(z)
movq m3, 24(z)
movq m4, 32(z)
movq m5, 40(z)
// Restore registers and return
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mod_p384)
Lbignum_mod_p384_shortinput:
xorq m0, m0
xorq m1, m1
xorq m2, m2
xorq m3, m3
xorq m4, m4
xorq m5, m5
testq k, k
jz Lbignum_mod_p384_writeback
movq (%rdx), m0
decq k
jz Lbignum_mod_p384_writeback
movq 8(%rdx), m1
decq k
jz Lbignum_mod_p384_writeback
movq 16(%rdx), m2
decq k
jz Lbignum_mod_p384_writeback
movq 24(%rdx), m3
decq k
jz Lbignum_mod_p384_writeback
movq 32(%rdx), m4
jmp Lbignum_mod_p384_writeback
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 3,225
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_optneg_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Optionally negate modulo p_384, z := (-x) mod p_384 (if p nonzero) or
// z := x (if p zero), assuming x reduced
// Inputs p, x[6]; output z[6]
//
// extern void bignum_optneg_p384(uint64_t z[static 6], uint64_t p,
// const uint64_t x[static 6]);
//
// Standard x86-64 ABI: RDI = z, RSI = p, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = p, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optneg_p384)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optneg_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optneg_p384)
.text
#define z %rdi
#define q %rsi
#define x %rdx
#define n0 %rax
#define n1 %rcx
#define n2 %r8
#define n3 %r9
#define n4 %r10
#define n5 %r11
#define n0short %eax
S2N_BN_SYMBOL(bignum_optneg_p384):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Adjust q by zeroing it if the input is zero (to avoid giving -0 = p_384,
// which is not strictly reduced even though it's correct modulo p_384).
// This step is redundant if we know a priori that the input is nonzero, which
// is the case for the y coordinate of points on the P-384 curve, for example.
movq (x), n0
orq 8(x), n0
movq 16(x), n1
orq 24(x), n1
movq 32(x), n2
orq 40(x), n2
orq n1, n0
orq n2, n0
negq n0
sbbq n0, n0
andq n0, q
// Turn q into a bitmask, all 1s for q=false, all 0s for q=true
negq q
sbbq q, q
notq q
// Let [n5;n4;n3;n2;n1] = if q then p_384 else -1
movl $0x00000000ffffffff, n0short
orq q, n0
movq $0xffffffff00000000, n1
orq q, n1
movq $0xfffffffffffffffe, n2
orq q, n2
movq $0xffffffffffffffff, n3
movq n3, n4
movq n3, n5
// Subtract so [n5;n4;n3;n2;n1;n0] = if q then p_384 - x else -1 - x
subq (x), n0
sbbq 8(x), n1
sbbq 16(x), n2
sbbq 24(x), n3
sbbq 32(x), n4
sbbq 40(x), n5
// XOR the words with the bitmask, which in the case q = false has the
// effect of restoring ~(-1 - x) = -(-1 - x) - 1 = 1 + x - 1 = x
// and write back the digits to the output
xorq q, n0
movq n0, (z)
xorq q, n1
movq n1, 8(z)
xorq q, n2
movq n2, 16(z)
xorq q, n3
movq n3, 24(z)
xorq q, n4
movq n4, 32(z)
xorq q, n5
movq n5, 40(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_optneg_p384)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,528
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_half_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Halve modulo p_384, z := (x / 2) mod p_384, assuming x reduced
// Input x[6]; output z[6]
//
// extern void bignum_half_p384(uint64_t z[static 6], const uint64_t x[static 6]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_half_p384)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_half_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_half_p384)
.text
#define z %rdi
#define x %rsi
#define a %rax
#define d0 %rcx
#define d1 %rdx
#define d2 %r8
#define d3 %r9
#define d4 %r10
#define d5 %r11
#define d0short %ecx
#define d3short %r9d
S2N_BN_SYMBOL(bignum_half_p384):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load lowest digit and get a mask for its lowest bit in d3
movq (x), a
movl $1, d3short
andq a, d3
negq d3
// Create a masked version of p_384 (top 3 words = the mask itself)
movl $0x00000000ffffffff, d0short
andq d3, d0
movq d0, d1
xorq d3, d1
movq d3, d2
addq d2, d2
andq d3, d2
movq d3, d4
movq d3, d5
// Perform addition with masked p_384. Catch the carry in a, as a bitmask
// for convenience though we only use its LSB below with SHRD
addq a, d0
adcq 8(x), d1
adcq 16(x), d2
adcq 24(x), d3
adcq 32(x), d4
adcq 40(x), d5
sbbq a, a
// Shift right, pushing the carry back down, and store back
shrdq $1, d1, d0
movq d0, (z)
shrdq $1, d2, d1
movq d1, 8(z)
shrdq $1, d3, d2
movq d2, 16(z)
shrdq $1, d4, d3
movq d3, 24(z)
shrdq $1, d5, d4
movq d4, 32(z)
shrdq $1, a, d5
movq d5, 40(z)
// Return
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_half_p384)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 220,891
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/p384_montjscalarmul.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery-Jacobian form scalar multiplication for P-384
// Input scalar[6], point[18]; output res[18]
//
// extern void p384_montjscalarmul
// (uint64_t res[static 18],
// const uint64_t scalar[static 6],
// const uint64_t point[static 18]);
//
// This function is a variant of its affine point version p384_scalarmul.
// Here, input and output points are assumed to be in Jacobian form with
// their coordinates in the Montgomery domain. Thus, if priming indicates
// Montgomery form, x' = (2^384 * x) mod p_384 etc., each point argument
// is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when
// z' is nonzero or the point at infinity (group identity) if z' = 0.
//
// Given scalar = n and point = P, assumed to be on the NIST elliptic
// curve P-384, returns a representation of n * P. If the result is the
// point at infinity (either because the input point was or because the
// scalar was a multiple of p_384) then the output is guaranteed to
// represent the point at infinity, i.e. to have its z coordinate zero.
//
// Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point
// Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjscalarmul)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjscalarmul)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjscalarmul)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
#define JACSIZE (3*NUMSIZE)
// Intermediate variables on the stack.
// The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE
// Uppercase syntactic variants make x86_att version simpler to generate.
#define SCALARB (0*NUMSIZE)
#define scalarb (0*NUMSIZE)(%rsp)
#define ACC (1*NUMSIZE)
#define acc (1*NUMSIZE)(%rsp)
#define TABENT (4*NUMSIZE)
#define tabent (4*NUMSIZE)(%rsp)
#define TAB (7*NUMSIZE)
#define tab (7*NUMSIZE)(%rsp)
#define res (55*NUMSIZE)(%rsp)
#define NSPACE 56*NUMSIZE
// Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator,
// which doesn't accept repetitions, assembler macros etc.
#define selectblock_xz(I) \
cmpq $I, %rdi ; \
cmovzq TAB+JACSIZE*(I-1)(%rsp), %rax ; \
cmovzq TAB+JACSIZE*(I-1)+8(%rsp), %rbx ; \
cmovzq TAB+JACSIZE*(I-1)+16(%rsp), %rcx ; \
cmovzq TAB+JACSIZE*(I-1)+24(%rsp), %rdx ; \
cmovzq TAB+JACSIZE*(I-1)+32(%rsp), %r8 ; \
cmovzq TAB+JACSIZE*(I-1)+40(%rsp), %r9 ; \
cmovzq TAB+JACSIZE*(I-1)+96(%rsp), %r10 ; \
cmovzq TAB+JACSIZE*(I-1)+104(%rsp), %r11 ; \
cmovzq TAB+JACSIZE*(I-1)+112(%rsp), %r12 ; \
cmovzq TAB+JACSIZE*(I-1)+120(%rsp), %r13 ; \
cmovzq TAB+JACSIZE*(I-1)+128(%rsp), %r14 ; \
cmovzq TAB+JACSIZE*(I-1)+136(%rsp), %r15
#define selectblock_y(I) \
cmpq $I, %rdi ; \
cmovzq TAB+JACSIZE*(I-1)+48(%rsp), %rax ; \
cmovzq TAB+JACSIZE*(I-1)+56(%rsp), %rbx ; \
cmovzq TAB+JACSIZE*(I-1)+64(%rsp), %rcx ; \
cmovzq TAB+JACSIZE*(I-1)+72(%rsp), %rdx ; \
cmovzq TAB+JACSIZE*(I-1)+80(%rsp), %r8 ; \
cmovzq TAB+JACSIZE*(I-1)+88(%rsp), %r9
S2N_BN_SYMBOL(p384_montjscalarmul):
CFI_START
_CET_ENDBR
// The Windows version literally calls the standard ABI version.
// This simplifies the proofs since subroutine offsets are fixed.
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
CFI_CALL(Lp384_montjscalarmul_standard)
CFI_POP(%rsi)
CFI_POP(%rdi)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p384_montjscalarmul)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp384_montjscalarmul_standard)
Lp384_montjscalarmul_standard:
CFI_START
#endif
// Real start of the standard ABI code.
CFI_PUSH(%r15)
CFI_PUSH(%r14)
CFI_PUSH(%r13)
CFI_PUSH(%r12)
CFI_PUSH(%rbp)
CFI_PUSH(%rbx)
CFI_DEC_RSP(NSPACE)
// Preserve the "res" input argument; others get processed early.
movq %rdi, res
// Reduce the input scalar mod n_384, i.e. conditionally subtract n_384.
// Store it to "scalarb".
movq (%rsi), %r8
movq $0xecec196accc52973, %rax
subq %rax, %r8
movq 8(%rsi), %r9
movq $0x581a0db248b0a77a, %rax
sbbq %rax, %r9
movq 16(%rsi), %r10
movq $0xc7634d81f4372ddf, %rax
sbbq %rax, %r10
movq 24(%rsi), %r11
movq $0xffffffffffffffff, %rax
sbbq %rax, %r11
movq 32(%rsi), %r12
sbbq %rax, %r12
movq 40(%rsi), %r13
sbbq %rax, %r13
cmovcq (%rsi), %r8
cmovcq 8(%rsi), %r9
cmovcq 16(%rsi), %r10
cmovcq 24(%rsi), %r11
cmovcq 32(%rsi), %r12
cmovcq 40(%rsi), %r13
movq %r8, SCALARB(%rsp)
movq %r9, SCALARB+8(%rsp)
movq %r10, SCALARB+16(%rsp)
movq %r11, SCALARB+24(%rsp)
movq %r12, SCALARB+32(%rsp)
movq %r13, SCALARB+40(%rsp)
// Set the tab[0] table entry to the input point = 1 * P
movq (%rdx), %rax
movq %rax, TAB(%rsp)
movq 8(%rdx), %rax
movq %rax, TAB+8(%rsp)
movq 16(%rdx), %rax
movq %rax, TAB+16(%rsp)
movq 24(%rdx), %rax
movq %rax, TAB+24(%rsp)
movq 32(%rdx), %rax
movq %rax, TAB+32(%rsp)
movq 40(%rdx), %rax
movq %rax, TAB+40(%rsp)
movq 48(%rdx), %rax
movq %rax, TAB+48(%rsp)
movq 56(%rdx), %rax
movq %rax, TAB+56(%rsp)
movq 64(%rdx), %rax
movq %rax, TAB+64(%rsp)
movq 72(%rdx), %rax
movq %rax, TAB+72(%rsp)
movq 80(%rdx), %rax
movq %rax, TAB+80(%rsp)
movq 88(%rdx), %rax
movq %rax, TAB+88(%rsp)
movq 96(%rdx), %rax
movq %rax, TAB+96(%rsp)
movq 104(%rdx), %rax
movq %rax, TAB+104(%rsp)
movq 112(%rdx), %rax
movq %rax, TAB+112(%rsp)
movq 120(%rdx), %rax
movq %rax, TAB+120(%rsp)
movq 128(%rdx), %rax
movq %rax, TAB+128(%rsp)
movq 136(%rdx), %rax
movq %rax, TAB+136(%rsp)
// Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P
leaq TAB+JACSIZE*1(%rsp), %rdi
leaq TAB(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq TAB+JACSIZE*2(%rsp), %rdi
leaq TAB+JACSIZE*1(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_p384_montjadd)
leaq TAB+JACSIZE*3(%rsp), %rdi
leaq TAB+JACSIZE*1(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq TAB+JACSIZE*4(%rsp), %rdi
leaq TAB+JACSIZE*3(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_p384_montjadd)
leaq TAB+JACSIZE*5(%rsp), %rdi
leaq TAB+JACSIZE*2(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq TAB+JACSIZE*6(%rsp), %rdi
leaq TAB+JACSIZE*5(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_p384_montjadd)
leaq TAB+JACSIZE*7(%rsp), %rdi
leaq TAB+JACSIZE*3(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq TAB+JACSIZE*8(%rsp), %rdi
leaq TAB+JACSIZE*7(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_p384_montjadd)
leaq TAB+JACSIZE*9(%rsp), %rdi
leaq TAB+JACSIZE*4(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq TAB+JACSIZE*10(%rsp), %rdi
leaq TAB+JACSIZE*9(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_p384_montjadd)
leaq TAB+JACSIZE*11(%rsp), %rdi
leaq TAB+JACSIZE*5(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq TAB+JACSIZE*12(%rsp), %rdi
leaq TAB+JACSIZE*11(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_p384_montjadd)
leaq TAB+JACSIZE*13(%rsp), %rdi
leaq TAB+JACSIZE*6(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq TAB+JACSIZE*14(%rsp), %rdi
leaq TAB+JACSIZE*13(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lp384_montjscalarmul_p384_montjadd)
leaq TAB+JACSIZE*15(%rsp), %rdi
leaq TAB+JACSIZE*7(%rsp), %rsi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
// Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed
// digits. The digits of the constant, in lowest-to-highest order, are as
// follows; they are generated dynamically to use fewer large constant loads.
//
// 0x0842108421084210
// 0x1084210842108421
// 0x2108421084210842
// 0x4210842108421084
// 0x8421084210842108
// 0x0842108421084210
movq $0x1084210842108421, %rax
movq %rax, %rcx
shrq $1, %rax
movq SCALARB(%rsp), %r8
addq %rax, %r8
movq SCALARB+8(%rsp), %r9
adcq %rcx, %r9
leaq (%rcx,%rcx), %rcx
movq SCALARB+16(%rsp), %r10
adcq %rcx, %r10
leaq (%rcx,%rcx), %rcx
movq SCALARB+24(%rsp), %r11
adcq %rcx, %r11
leaq (%rcx,%rcx), %rcx
movq SCALARB+32(%rsp), %r12
adcq %rcx, %r12
movq SCALARB+40(%rsp), %r13
adcq %rax, %r13
sbbq %rdi, %rdi
negq %rdi
// Record the top bitfield in %rdi then shift the whole scalar left 4 bits
// to align the top of the next bitfield with the MSB (bits 379..383).
shldq $4, %r13, %rdi
shldq $4, %r12, %r13
shldq $4, %r11, %r12
shldq $4, %r10, %r11
shldq $4, %r9, %r10
shldq $4, %r8, %r9
shlq $4, %r8
movq %r8, SCALARB(%rsp)
movq %r9, SCALARB+8(%rsp)
movq %r10, SCALARB+16(%rsp)
movq %r11, SCALARB+24(%rsp)
movq %r12, SCALARB+32(%rsp)
movq %r13, SCALARB+40(%rsp)
// Initialize the accumulator to the corresponding entry using constant-time
// lookup in the table. This top digit, uniquely, is not recoded so there is
// no sign adjustment to make. On the x86 integer side we don't have enough
// registers to hold all the fields; this could be better done with SIMD
// registers anyway. So we do x and z coordinates in one sweep, y in another
// (this is a rehearsal for below where we might need to negate the y).
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
xorl %r10d, %r10d
xorl %r11d, %r11d
xorl %r12d, %r12d
xorl %r13d, %r13d
xorl %r14d, %r14d
xorl %r15d, %r15d
selectblock_xz(1)
selectblock_xz(2)
selectblock_xz(3)
selectblock_xz(4)
selectblock_xz(5)
selectblock_xz(6)
selectblock_xz(7)
selectblock_xz(8)
selectblock_xz(9)
selectblock_xz(10)
selectblock_xz(11)
selectblock_xz(12)
selectblock_xz(13)
selectblock_xz(14)
selectblock_xz(15)
selectblock_xz(16)
movq %rax, ACC(%rsp)
movq %rbx, ACC+8(%rsp)
movq %rcx, ACC+16(%rsp)
movq %rdx, ACC+24(%rsp)
movq %r8, ACC+32(%rsp)
movq %r9, ACC+40(%rsp)
movq %r10, ACC+96(%rsp)
movq %r11, ACC+104(%rsp)
movq %r12, ACC+112(%rsp)
movq %r13, ACC+120(%rsp)
movq %r14, ACC+128(%rsp)
movq %r15, ACC+136(%rsp)
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
selectblock_y(1)
selectblock_y(2)
selectblock_y(3)
selectblock_y(4)
selectblock_y(5)
selectblock_y(6)
selectblock_y(7)
selectblock_y(8)
selectblock_y(9)
selectblock_y(10)
selectblock_y(11)
selectblock_y(12)
selectblock_y(13)
selectblock_y(14)
selectblock_y(15)
selectblock_y(16)
movq %rax, ACC+48(%rsp)
movq %rbx, ACC+56(%rsp)
movq %rcx, ACC+64(%rsp)
movq %rdx, ACC+72(%rsp)
movq %r8, ACC+80(%rsp)
movq %r9, ACC+88(%rsp)
// Main loop over size-5 bitfields: double 5 times then add signed digit
// At each stage we shift the scalar left by 5 bits so we can simply pick
// the top 5 bits as the bitfield, saving some fiddle over indexing.
movl $380, %ebp
Lp384_montjscalarmul_mainloop:
subq $5, %rbp
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_p384_montjdouble)
// Choose the bitfield and adjust it to sign and magnitude
movq SCALARB(%rsp), %r8
movq SCALARB+8(%rsp), %r9
movq SCALARB+16(%rsp), %r10
movq SCALARB+24(%rsp), %r11
movq SCALARB+32(%rsp), %r12
movq SCALARB+40(%rsp), %r13
movq %r13, %rdi
shrq $59, %rdi
shldq $5, %r12, %r13
shldq $5, %r11, %r12
shldq $5, %r10, %r11
shldq $5, %r9, %r10
shldq $5, %r8, %r9
shlq $5, %r8
movq %r8, SCALARB(%rsp)
movq %r9, SCALARB+8(%rsp)
movq %r10, SCALARB+16(%rsp)
movq %r11, SCALARB+24(%rsp)
movq %r12, SCALARB+32(%rsp)
movq %r13, SCALARB+40(%rsp)
subq $16, %rdi
sbbq %rsi, %rsi // %rsi = sign of digit (-1 = negative)
xorq %rsi, %rdi
subq %rsi, %rdi // %rdi = absolute value of digit
// Conditionally select the table entry tab[i-1] = i * P in constant time
// Again, this is done in two sweeps, first doing x and z then y.
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
xorl %r10d, %r10d
xorl %r11d, %r11d
xorl %r12d, %r12d
xorl %r13d, %r13d
xorl %r14d, %r14d
xorl %r15d, %r15d
selectblock_xz(1)
selectblock_xz(2)
selectblock_xz(3)
selectblock_xz(4)
selectblock_xz(5)
selectblock_xz(6)
selectblock_xz(7)
selectblock_xz(8)
selectblock_xz(9)
selectblock_xz(10)
selectblock_xz(11)
selectblock_xz(12)
selectblock_xz(13)
selectblock_xz(14)
selectblock_xz(15)
selectblock_xz(16)
movq %rax, TABENT(%rsp)
movq %rbx, TABENT+8(%rsp)
movq %rcx, TABENT+16(%rsp)
movq %rdx, TABENT+24(%rsp)
movq %r8, TABENT+32(%rsp)
movq %r9, TABENT+40(%rsp)
movq %r10, TABENT+96(%rsp)
movq %r11, TABENT+104(%rsp)
movq %r12, TABENT+112(%rsp)
movq %r13, TABENT+120(%rsp)
movq %r14, TABENT+128(%rsp)
movq %r15, TABENT+136(%rsp)
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
selectblock_y(1)
selectblock_y(2)
selectblock_y(3)
selectblock_y(4)
selectblock_y(5)
selectblock_y(6)
selectblock_y(7)
selectblock_y(8)
selectblock_y(9)
selectblock_y(10)
selectblock_y(11)
selectblock_y(12)
selectblock_y(13)
selectblock_y(14)
selectblock_y(15)
selectblock_y(16)
// Store it to "tabent" with the y coordinate optionally negated.
// This is done carefully to give coordinates < p_384 even in
// the degenerate case y = 0 (when z = 0 for points on the curve).
// The digits of the prime p_384 are generated dynamically from
// the zeroth via not/lea to reduce the number of constant loads.
movq %rax, %r10
orq %rbx, %r10
movq %rcx, %r11
orq %rdx, %r11
movq %r8, %r12
orq %r9, %r12
orq %r11, %r10
orq %r12, %r10
cmovzq %r10, %rsi
movl $0xffffffff, %r10d
movq %r10, %r11
notq %r11
leaq (%r10,%r11), %r13
subq %rax, %r10
leaq -1(%r13), %r12
sbbq %rbx, %r11
movq %r13, %r14
sbbq %rcx, %r12
sbbq %rdx, %r13
movq %r14, %r15
sbbq %r8, %r14
sbbq %r9, %r15
testq %rsi, %rsi
cmovnzq %r10, %rax
cmovnzq %r11, %rbx
cmovnzq %r12, %rcx
cmovnzq %r13, %rdx
cmovnzq %r14, %r8
cmovnzq %r15, %r9
movq %rax, TABENT+48(%rsp)
movq %rbx, TABENT+56(%rsp)
movq %rcx, TABENT+64(%rsp)
movq %rdx, TABENT+72(%rsp)
movq %r8, TABENT+80(%rsp)
movq %r9, TABENT+88(%rsp)
// Add to the accumulator
leaq TABENT(%rsp), %rdx
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lp384_montjscalarmul_p384_montjadd)
testq %rbp, %rbp
jne Lp384_montjscalarmul_mainloop
// That's the end of the main loop, and we just need to copy the
// result in "acc" to the output.
movq res, %rdi
movq ACC(%rsp), %rax
movq %rax, (%rdi)
movq ACC+8(%rsp), %rax
movq %rax, 8(%rdi)
movq ACC+16(%rsp), %rax
movq %rax, 16(%rdi)
movq ACC+24(%rsp), %rax
movq %rax, 24(%rdi)
movq ACC+32(%rsp), %rax
movq %rax, 32(%rdi)
movq ACC+40(%rsp), %rax
movq %rax, 40(%rdi)
movq ACC+48(%rsp), %rax
movq %rax, 48(%rdi)
movq ACC+56(%rsp), %rax
movq %rax, 56(%rdi)
movq ACC+64(%rsp), %rax
movq %rax, 64(%rdi)
movq ACC+72(%rsp), %rax
movq %rax, 72(%rdi)
movq ACC+80(%rsp), %rax
movq %rax, 80(%rdi)
movq ACC+88(%rsp), %rax
movq %rax, 88(%rdi)
movq ACC+96(%rsp), %rax
movq %rax, 96(%rdi)
movq ACC+104(%rsp), %rax
movq %rax, 104(%rdi)
movq ACC+112(%rsp), %rax
movq %rax, 112(%rdi)
movq ACC+120(%rsp), %rax
movq %rax, 120(%rdi)
movq ACC+128(%rsp), %rax
movq %rax, 128(%rdi)
movq ACC+136(%rsp), %rax
movq %rax, 136(%rdi)
// Restore stack and registers and return
CFI_INC_RSP(NSPACE)
CFI_POP(%rbx)
CFI_POP(%rbp)
CFI_POP(%r12)
CFI_POP(%r13)
CFI_POP(%r14)
CFI_POP(%r15)
CFI_RET
#if WINDOWS_ABI
S2N_BN_SIZE_DIRECTIVE(Lp384_montjscalarmul_standard)
#else
S2N_BN_SIZE_DIRECTIVE(p384_montjscalarmul)
#endif
// Local copies of subroutines, complete clones at the moment
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp384_montjscalarmul_p384_montjadd)
Lp384_montjscalarmul_p384_montjadd:
CFI_START
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(352)
movq %rsi, 0x150(%rsp)
movq %rdx, 0x158(%rsp)
movq 0x60(%rsi), %rdx
mulxq 0x68(%rsi), %r9, %r10
mulxq 0x78(%rsi), %r11, %r12
mulxq 0x88(%rsi), %r13, %r14
movq 0x78(%rsi), %rdx
mulxq 0x80(%rsi), %r15, %rcx
xorl %ebp, %ebp
movq 0x70(%rsi), %rdx
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x68(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x68(%rsi), %rdx
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x88(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbp, %r15
adoxq %rbp, %rcx
adcq %rbp, %rcx
xorl %ebp, %ebp
movq 0x80(%rsi), %rdx
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
movq 0x70(%rsi), %rdx
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x88(%rsi), %rax, %rdx
adcxq %rax, %r15
adoxq %rdx, %rcx
movq 0x88(%rsi), %rdx
mulxq 0x80(%rsi), %rbx, %rbp
mulxq 0x78(%rsi), %rax, %rdx
adcxq %rax, %rcx
adoxq %rdx, %rbx
movl $0x0, %eax
adcxq %rax, %rbx
adoxq %rax, %rbp
adcq %rax, %rbp
xorq %rax, %rax
movq 0x60(%rsi), %rdx
mulxq 0x60(%rsi), %r8, %rax
adcxq %r9, %r9
adoxq %rax, %r9
movq 0x68(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x70(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x78(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %r15, %r15
adoxq %rdx, %r15
movq 0x80(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %rcx, %rcx
adoxq %rax, %rcx
adcxq %rbx, %rbx
adoxq %rdx, %rbx
movq 0x88(%rsi), %rdx
mulxq %rdx, %rax, %rsi
adcxq %rbp, %rbp
adoxq %rax, %rbp
movl $0x0, %eax
adcxq %rax, %rsi
adoxq %rax, %rsi
movq %rbx, (%rsp)
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r8, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r8
addq %rbx, %rax
adcq %rdx, %r8
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r9
sbbq %r8, %r10
sbbq %rbx, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rdx, %r8
sbbq $0x0, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r9, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r9
addq %rbx, %rax
adcq %rdx, %r9
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r10
sbbq %r9, %r11
sbbq %rbx, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rdx, %r9
sbbq $0x0, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r10, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r10
addq %rbx, %rax
adcq %rdx, %r10
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r11
sbbq %r10, %r12
sbbq %rbx, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rdx, %r10
sbbq $0x0, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r11, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r11
addq %rbx, %rax
adcq %rdx, %r11
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r12
sbbq %r11, %r13
sbbq %rbx, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rdx, %r11
sbbq $0x0, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r12, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r12
addq %rbx, %rax
adcq %rdx, %r12
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r13
sbbq %r12, %r8
sbbq %rbx, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rdx, %r12
sbbq $0x0, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r13, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r13
addq %rbx, %rax
adcq %rdx, %r13
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r8
sbbq %r13, %r9
sbbq %rbx, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rdx, %r13
sbbq $0x0, %r13
movq (%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rsi
movl $0x0, %r8d
movabsq $0xffffffff00000001, %rax
movl $0xffffffff, %r9d
movl $0x1, %r10d
cmovaeq %r8, %rax
cmovaeq %r8, %r9
cmovaeq %r8, %r10
addq %rax, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r8, %rbx
adcq %r8, %rbp
adcq %r8, %rsi
movq %r14, (%rsp)
movq %r15, 0x8(%rsp)
movq %rcx, 0x10(%rsp)
movq %rbx, 0x18(%rsp)
movq %rbp, 0x20(%rsp)
movq %rsi, 0x28(%rsp)
movq 0x158(%rsp), %rsi
movq 0x60(%rsi), %rdx
mulxq 0x68(%rsi), %r9, %r10
mulxq 0x78(%rsi), %r11, %r12
mulxq 0x88(%rsi), %r13, %r14
movq 0x78(%rsi), %rdx
mulxq 0x80(%rsi), %r15, %rcx
xorl %ebp, %ebp
movq 0x70(%rsi), %rdx
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x68(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x68(%rsi), %rdx
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x88(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbp, %r15
adoxq %rbp, %rcx
adcq %rbp, %rcx
xorl %ebp, %ebp
movq 0x80(%rsi), %rdx
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
movq 0x70(%rsi), %rdx
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x88(%rsi), %rax, %rdx
adcxq %rax, %r15
adoxq %rdx, %rcx
movq 0x88(%rsi), %rdx
mulxq 0x80(%rsi), %rbx, %rbp
mulxq 0x78(%rsi), %rax, %rdx
adcxq %rax, %rcx
adoxq %rdx, %rbx
movl $0x0, %eax
adcxq %rax, %rbx
adoxq %rax, %rbp
adcq %rax, %rbp
xorq %rax, %rax
movq 0x60(%rsi), %rdx
mulxq 0x60(%rsi), %r8, %rax
adcxq %r9, %r9
adoxq %rax, %r9
movq 0x68(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x70(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x78(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %r15, %r15
adoxq %rdx, %r15
movq 0x80(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %rcx, %rcx
adoxq %rax, %rcx
adcxq %rbx, %rbx
adoxq %rdx, %rbx
movq 0x88(%rsi), %rdx
mulxq %rdx, %rax, %rsi
adcxq %rbp, %rbp
adoxq %rax, %rbp
movl $0x0, %eax
adcxq %rax, %rsi
adoxq %rax, %rsi
movq %rbx, 0xf0(%rsp)
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r8, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r8
addq %rbx, %rax
adcq %rdx, %r8
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r9
sbbq %r8, %r10
sbbq %rbx, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rdx, %r8
sbbq $0x0, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r9, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r9
addq %rbx, %rax
adcq %rdx, %r9
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r10
sbbq %r9, %r11
sbbq %rbx, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rdx, %r9
sbbq $0x0, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r10, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r10
addq %rbx, %rax
adcq %rdx, %r10
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r11
sbbq %r10, %r12
sbbq %rbx, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rdx, %r10
sbbq $0x0, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r11, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r11
addq %rbx, %rax
adcq %rdx, %r11
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r12
sbbq %r11, %r13
sbbq %rbx, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rdx, %r11
sbbq $0x0, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r12, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r12
addq %rbx, %rax
adcq %rdx, %r12
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r13
sbbq %r12, %r8
sbbq %rbx, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rdx, %r12
sbbq $0x0, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r13, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r13
addq %rbx, %rax
adcq %rdx, %r13
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r8
sbbq %r13, %r9
sbbq %rbx, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rdx, %r13
sbbq $0x0, %r13
movq 0xf0(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rsi
movl $0x0, %r8d
movabsq $0xffffffff00000001, %rax
movl $0xffffffff, %r9d
movl $0x1, %r10d
cmovaeq %r8, %rax
cmovaeq %r8, %r9
cmovaeq %r8, %r10
addq %rax, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r8, %rbx
adcq %r8, %rbp
adcq %r8, %rsi
movq %r14, 0xf0(%rsp)
movq %r15, 0xf8(%rsp)
movq %rcx, 0x100(%rsp)
movq %rbx, 0x108(%rsp)
movq %rbp, 0x110(%rsp)
movq %rsi, 0x118(%rsp)
movq 0x150(%rsp), %rsi
movq 0x158(%rsp), %rcx
movq 0x30(%rsi), %rdx
xorl %r15d, %r15d
mulxq 0x60(%rcx), %r8, %r9
mulxq 0x68(%rcx), %rbx, %r10
addq %rbx, %r9
mulxq 0x70(%rcx), %rbx, %r11
adcq %rbx, %r10
mulxq 0x78(%rcx), %rbx, %r12
adcq %rbx, %r11
mulxq 0x80(%rcx), %rbx, %r13
adcq %rbx, %r12
mulxq 0x88(%rcx), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x38(%rsi), %rdx
xorl %r8d, %r8d
mulxq 0x60(%rcx), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x68(%rcx), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x70(%rcx), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x78(%rcx), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x80(%rcx), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x88(%rcx), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x40(%rsi), %rdx
xorl %r9d, %r9d
mulxq 0x60(%rcx), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x68(%rcx), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x70(%rcx), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x78(%rcx), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x80(%rcx), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x88(%rcx), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x48(%rsi), %rdx
xorl %r10d, %r10d
mulxq 0x60(%rcx), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x68(%rcx), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x70(%rcx), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x78(%rcx), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x80(%rcx), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x88(%rcx), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x50(%rsi), %rdx
xorl %r11d, %r11d
mulxq 0x60(%rcx), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x68(%rcx), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x70(%rcx), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x78(%rcx), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x80(%rcx), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x88(%rcx), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x58(%rsi), %rdx
xorl %r12d, %r12d
mulxq 0x60(%rcx), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x68(%rcx), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x70(%rcx), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x78(%rcx), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x80(%rcx), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x88(%rcx), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x120(%rsp)
movq %r15, 0x128(%rsp)
movq %r8, 0x130(%rsp)
movq %r9, 0x138(%rsp)
movq %r10, 0x140(%rsp)
movq %r11, 0x148(%rsp)
movq 0x150(%rsp), %rsi
movq 0x158(%rsp), %rcx
movq 0x30(%rcx), %rdx
xorl %r15d, %r15d
mulxq 0x60(%rsi), %r8, %r9
mulxq 0x68(%rsi), %rbx, %r10
addq %rbx, %r9
mulxq 0x70(%rsi), %rbx, %r11
adcq %rbx, %r10
mulxq 0x78(%rsi), %rbx, %r12
adcq %rbx, %r11
mulxq 0x80(%rsi), %rbx, %r13
adcq %rbx, %r12
mulxq 0x88(%rsi), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x38(%rcx), %rdx
xorl %r8d, %r8d
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x68(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x70(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x88(%rsi), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x40(%rcx), %rdx
xorl %r9d, %r9d
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x68(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x70(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x88(%rsi), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x48(%rcx), %rdx
xorl %r10d, %r10d
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x68(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x70(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x88(%rsi), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x50(%rcx), %rdx
xorl %r11d, %r11d
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x68(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x70(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x88(%rsi), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x58(%rcx), %rdx
xorl %r12d, %r12d
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x68(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x70(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x88(%rsi), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq %r8, 0x40(%rsp)
movq %r9, 0x48(%rsp)
movq %r10, 0x50(%rsp)
movq %r11, 0x58(%rsp)
movq 0x158(%rsp), %rcx
movq (%rcx), %rdx
xorl %r15d, %r15d
mulxq (%rsp), %r8, %r9
mulxq 0x8(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0x10(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0x18(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0x20(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0x28(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x8(%rcx), %rdx
xorl %r8d, %r8d
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x18(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x28(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x10(%rcx), %rdx
xorl %r9d, %r9d
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x18(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x28(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x18(%rcx), %rdx
xorl %r10d, %r10d
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x18(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x28(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x20(%rcx), %rdx
xorl %r11d, %r11d
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x18(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x28(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x28(%rcx), %rdx
xorl %r12d, %r12d
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x18(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x28(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x60(%rsp)
movq %r15, 0x68(%rsp)
movq %r8, 0x70(%rsp)
movq %r9, 0x78(%rsp)
movq %r10, 0x80(%rsp)
movq %r11, 0x88(%rsp)
movq 0x150(%rsp), %rsi
movq (%rsi), %rdx
xorl %r15d, %r15d
mulxq 0xf0(%rsp), %r8, %r9
mulxq 0xf8(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0x100(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0x108(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0x110(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0x118(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x8(%rsi), %rdx
xorl %r8d, %r8d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x10(%rsi), %rdx
xorl %r9d, %r9d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x18(%rsi), %rdx
xorl %r10d, %r10d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x20(%rsi), %rdx
xorl %r11d, %r11d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x28(%rsi), %rdx
xorl %r12d, %r12d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
movq %r8, 0xd0(%rsp)
movq %r9, 0xd8(%rsp)
movq %r10, 0xe0(%rsp)
movq %r11, 0xe8(%rsp)
movq 0x30(%rsp), %rdx
xorl %r15d, %r15d
mulxq (%rsp), %r8, %r9
mulxq 0x8(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0x10(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0x18(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0x20(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0x28(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x38(%rsp), %rdx
xorl %r8d, %r8d
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x18(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x28(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x40(%rsp), %rdx
xorl %r9d, %r9d
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x18(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x28(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x48(%rsp), %rdx
xorl %r10d, %r10d
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x18(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x28(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x50(%rsp), %rdx
xorl %r11d, %r11d
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x18(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x28(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x58(%rsp), %rdx
xorl %r12d, %r12d
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x18(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x28(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq %r8, 0x40(%rsp)
movq %r9, 0x48(%rsp)
movq %r10, 0x50(%rsp)
movq %r11, 0x58(%rsp)
movq 0x120(%rsp), %rdx
xorl %r15d, %r15d
mulxq 0xf0(%rsp), %r8, %r9
mulxq 0xf8(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0x100(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0x108(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0x110(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0x118(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x128(%rsp), %rdx
xorl %r8d, %r8d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x130(%rsp), %rdx
xorl %r9d, %r9d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x138(%rsp), %rdx
xorl %r10d, %r10d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x140(%rsp), %rdx
xorl %r11d, %r11d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x148(%rsp), %rdx
xorl %r12d, %r12d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x120(%rsp)
movq %r15, 0x128(%rsp)
movq %r8, 0x130(%rsp)
movq %r9, 0x138(%rsp)
movq %r10, 0x140(%rsp)
movq %r11, 0x148(%rsp)
movq 0x60(%rsp), %rax
subq 0xc0(%rsp), %rax
movq 0x68(%rsp), %rdx
sbbq 0xc8(%rsp), %rdx
movq 0x70(%rsp), %r8
sbbq 0xd0(%rsp), %r8
movq 0x78(%rsp), %r9
sbbq 0xd8(%rsp), %r9
movq 0x80(%rsp), %r10
sbbq 0xe0(%rsp), %r10
movq 0x88(%rsp), %r11
sbbq 0xe8(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, 0xf0(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0xf8(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x100(%rsp)
sbbq $0x0, %r9
movq %r9, 0x108(%rsp)
sbbq $0x0, %r10
movq %r10, 0x110(%rsp)
sbbq $0x0, %r11
movq %r11, 0x118(%rsp)
movq 0x30(%rsp), %rax
subq 0x120(%rsp), %rax
movq 0x38(%rsp), %rdx
sbbq 0x128(%rsp), %rdx
movq 0x40(%rsp), %r8
sbbq 0x130(%rsp), %r8
movq 0x48(%rsp), %r9
sbbq 0x138(%rsp), %r9
movq 0x50(%rsp), %r10
sbbq 0x140(%rsp), %r10
movq 0x58(%rsp), %r11
sbbq 0x148(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, 0x30(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0x38(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x40(%rsp)
sbbq $0x0, %r9
movq %r9, 0x48(%rsp)
sbbq $0x0, %r10
movq %r10, 0x50(%rsp)
sbbq $0x0, %r11
movq %r11, 0x58(%rsp)
movq 0xf0(%rsp), %rdx
mulxq 0xf8(%rsp), %r9, %r10
mulxq 0x108(%rsp), %r11, %r12
mulxq 0x118(%rsp), %r13, %r14
movq 0x108(%rsp), %rdx
mulxq 0x110(%rsp), %r15, %rcx
xorl %ebp, %ebp
movq 0x100(%rsp), %rdx
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0xf8(%rsp), %rdx
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x118(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbp, %r15
adoxq %rbp, %rcx
adcq %rbp, %rcx
xorl %ebp, %ebp
movq 0x110(%rsp), %rdx
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
movq 0x100(%rsp), %rdx
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x118(%rsp), %rax, %rdx
adcxq %rax, %r15
adoxq %rdx, %rcx
movq 0x118(%rsp), %rdx
mulxq 0x110(%rsp), %rbx, %rbp
mulxq 0x108(%rsp), %rax, %rdx
adcxq %rax, %rcx
adoxq %rdx, %rbx
movl $0x0, %eax
adcxq %rax, %rbx
adoxq %rax, %rbp
adcq %rax, %rbp
xorq %rax, %rax
movq 0xf0(%rsp), %rdx
mulxq 0xf0(%rsp), %r8, %rax
adcxq %r9, %r9
adoxq %rax, %r9
movq 0xf8(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x100(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x108(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %r15, %r15
adoxq %rdx, %r15
movq 0x110(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %rcx, %rcx
adoxq %rax, %rcx
adcxq %rbx, %rbx
adoxq %rdx, %rbx
movq 0x118(%rsp), %rdx
mulxq %rdx, %rax, %rsi
adcxq %rbp, %rbp
adoxq %rax, %rbp
movl $0x0, %eax
adcxq %rax, %rsi
adoxq %rax, %rsi
movq %rbx, 0x90(%rsp)
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r8, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r8
addq %rbx, %rax
adcq %rdx, %r8
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r9
sbbq %r8, %r10
sbbq %rbx, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rdx, %r8
sbbq $0x0, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r9, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r9
addq %rbx, %rax
adcq %rdx, %r9
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r10
sbbq %r9, %r11
sbbq %rbx, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rdx, %r9
sbbq $0x0, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r10, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r10
addq %rbx, %rax
adcq %rdx, %r10
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r11
sbbq %r10, %r12
sbbq %rbx, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rdx, %r10
sbbq $0x0, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r11, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r11
addq %rbx, %rax
adcq %rdx, %r11
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r12
sbbq %r11, %r13
sbbq %rbx, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rdx, %r11
sbbq $0x0, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r12, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r12
addq %rbx, %rax
adcq %rdx, %r12
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r13
sbbq %r12, %r8
sbbq %rbx, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rdx, %r12
sbbq $0x0, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r13, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r13
addq %rbx, %rax
adcq %rdx, %r13
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r8
sbbq %r13, %r9
sbbq %rbx, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rdx, %r13
sbbq $0x0, %r13
movq 0x90(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rsi
movl $0x0, %r8d
movabsq $0xffffffff00000001, %rax
movl $0xffffffff, %r9d
movl $0x1, %r10d
cmovaeq %r8, %rax
cmovaeq %r8, %r9
cmovaeq %r8, %r10
addq %rax, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r8, %rbx
adcq %r8, %rbp
adcq %r8, %rsi
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq %rcx, 0xa0(%rsp)
movq %rbx, 0xa8(%rsp)
movq %rbp, 0xb0(%rsp)
movq %rsi, 0xb8(%rsp)
movq 0x30(%rsp), %rdx
mulxq 0x38(%rsp), %r9, %r10
mulxq 0x48(%rsp), %r11, %r12
mulxq 0x58(%rsp), %r13, %r14
movq 0x48(%rsp), %rdx
mulxq 0x50(%rsp), %r15, %rcx
xorl %ebp, %ebp
movq 0x40(%rsp), %rdx
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x38(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x38(%rsp), %rdx
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x58(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbp, %r15
adoxq %rbp, %rcx
adcq %rbp, %rcx
xorl %ebp, %ebp
movq 0x50(%rsp), %rdx
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
movq 0x40(%rsp), %rdx
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x58(%rsp), %rax, %rdx
adcxq %rax, %r15
adoxq %rdx, %rcx
movq 0x58(%rsp), %rdx
mulxq 0x50(%rsp), %rbx, %rbp
mulxq 0x48(%rsp), %rax, %rdx
adcxq %rax, %rcx
adoxq %rdx, %rbx
movl $0x0, %eax
adcxq %rax, %rbx
adoxq %rax, %rbp
adcq %rax, %rbp
xorq %rax, %rax
movq 0x30(%rsp), %rdx
mulxq 0x30(%rsp), %r8, %rax
adcxq %r9, %r9
adoxq %rax, %r9
movq 0x38(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x40(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x48(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %r15, %r15
adoxq %rdx, %r15
movq 0x50(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %rcx, %rcx
adoxq %rax, %rcx
adcxq %rbx, %rbx
adoxq %rdx, %rbx
movq 0x58(%rsp), %rdx
mulxq %rdx, %rax, %rsi
adcxq %rbp, %rbp
adoxq %rax, %rbp
movl $0x0, %eax
adcxq %rax, %rsi
adoxq %rax, %rsi
movq %rbx, (%rsp)
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r8, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r8
addq %rbx, %rax
adcq %rdx, %r8
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r9
sbbq %r8, %r10
sbbq %rbx, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rdx, %r8
sbbq $0x0, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r9, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r9
addq %rbx, %rax
adcq %rdx, %r9
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r10
sbbq %r9, %r11
sbbq %rbx, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rdx, %r9
sbbq $0x0, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r10, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r10
addq %rbx, %rax
adcq %rdx, %r10
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r11
sbbq %r10, %r12
sbbq %rbx, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rdx, %r10
sbbq $0x0, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r11, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r11
addq %rbx, %rax
adcq %rdx, %r11
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r12
sbbq %r11, %r13
sbbq %rbx, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rdx, %r11
sbbq $0x0, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r12, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r12
addq %rbx, %rax
adcq %rdx, %r12
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r13
sbbq %r12, %r8
sbbq %rbx, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rdx, %r12
sbbq $0x0, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r13, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r13
addq %rbx, %rax
adcq %rdx, %r13
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r8
sbbq %r13, %r9
sbbq %rbx, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rdx, %r13
sbbq $0x0, %r13
movq (%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rsi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rsi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rsi
movq %r14, (%rsp)
movq %r15, 0x8(%rsp)
movq %rcx, 0x10(%rsp)
movq %rbx, 0x18(%rsp)
movq %rbp, 0x20(%rsp)
movq %rsi, 0x28(%rsp)
movq 0xc0(%rsp), %rdx
xorl %r15d, %r15d
mulxq 0x90(%rsp), %r8, %r9
mulxq 0x98(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0xa0(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0xa8(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0xb0(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0xb8(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0xc8(%rsp), %rdx
xorl %r8d, %r8d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0xd0(%rsp), %rdx
xorl %r9d, %r9d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0xd8(%rsp), %rdx
xorl %r10d, %r10d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0xe0(%rsp), %rdx
xorl %r11d, %r11d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0xe8(%rsp), %rdx
xorl %r12d, %r12d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
movq %r8, 0xd0(%rsp)
movq %r9, 0xd8(%rsp)
movq %r10, 0xe0(%rsp)
movq %r11, 0xe8(%rsp)
movq 0x60(%rsp), %rdx
xorl %r15d, %r15d
mulxq 0x90(%rsp), %r8, %r9
mulxq 0x98(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0xa0(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0xa8(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0xb0(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0xb8(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x68(%rsp), %rdx
xorl %r8d, %r8d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x70(%rsp), %rdx
xorl %r9d, %r9d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x78(%rsp), %rdx
xorl %r10d, %r10d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x80(%rsp), %rdx
xorl %r11d, %r11d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x88(%rsp), %rdx
xorl %r12d, %r12d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x60(%rsp)
movq %r15, 0x68(%rsp)
movq %r8, 0x70(%rsp)
movq %r9, 0x78(%rsp)
movq %r10, 0x80(%rsp)
movq %r11, 0x88(%rsp)
movq (%rsp), %rax
subq 0xc0(%rsp), %rax
movq 0x8(%rsp), %rdx
sbbq 0xc8(%rsp), %rdx
movq 0x10(%rsp), %r8
sbbq 0xd0(%rsp), %r8
movq 0x18(%rsp), %r9
sbbq 0xd8(%rsp), %r9
movq 0x20(%rsp), %r10
sbbq 0xe0(%rsp), %r10
movq 0x28(%rsp), %r11
sbbq 0xe8(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, (%rsp)
sbbq %rcx, %rdx
movq %rdx, 0x8(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x10(%rsp)
sbbq $0x0, %r9
movq %r9, 0x18(%rsp)
sbbq $0x0, %r10
movq %r10, 0x20(%rsp)
sbbq $0x0, %r11
movq %r11, 0x28(%rsp)
movq 0x60(%rsp), %rax
subq 0xc0(%rsp), %rax
movq 0x68(%rsp), %rdx
sbbq 0xc8(%rsp), %rdx
movq 0x70(%rsp), %r8
sbbq 0xd0(%rsp), %r8
movq 0x78(%rsp), %r9
sbbq 0xd8(%rsp), %r9
movq 0x80(%rsp), %r10
sbbq 0xe0(%rsp), %r10
movq 0x88(%rsp), %r11
sbbq 0xe8(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, 0x90(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0x98(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0xa0(%rsp)
sbbq $0x0, %r9
movq %r9, 0xa8(%rsp)
sbbq $0x0, %r10
movq %r10, 0xb0(%rsp)
sbbq $0x0, %r11
movq %r11, 0xb8(%rsp)
movq 0x150(%rsp), %rsi
movq 0x60(%rsi), %rdx
xorl %r15d, %r15d
mulxq 0xf0(%rsp), %r8, %r9
mulxq 0xf8(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0x100(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0x108(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0x110(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0x118(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x68(%rsi), %rdx
xorl %r8d, %r8d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x70(%rsi), %rdx
xorl %r9d, %r9d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x78(%rsi), %rdx
xorl %r10d, %r10d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x80(%rsi), %rdx
xorl %r11d, %r11d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x88(%rsi), %rdx
xorl %r12d, %r12d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xf0(%rsp)
movq %r15, 0xf8(%rsp)
movq %r8, 0x100(%rsp)
movq %r9, 0x108(%rsp)
movq %r10, 0x110(%rsp)
movq %r11, 0x118(%rsp)
movq (%rsp), %rax
subq 0x60(%rsp), %rax
movq 0x8(%rsp), %rdx
sbbq 0x68(%rsp), %rdx
movq 0x10(%rsp), %r8
sbbq 0x70(%rsp), %r8
movq 0x18(%rsp), %r9
sbbq 0x78(%rsp), %r9
movq 0x20(%rsp), %r10
sbbq 0x80(%rsp), %r10
movq 0x28(%rsp), %r11
sbbq 0x88(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, (%rsp)
sbbq %rcx, %rdx
movq %rdx, 0x8(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x10(%rsp)
sbbq $0x0, %r9
movq %r9, 0x18(%rsp)
sbbq $0x0, %r10
movq %r10, 0x20(%rsp)
sbbq $0x0, %r11
movq %r11, 0x28(%rsp)
movq 0xc0(%rsp), %rax
subq (%rsp), %rax
movq 0xc8(%rsp), %rdx
sbbq 0x8(%rsp), %rdx
movq 0xd0(%rsp), %r8
sbbq 0x10(%rsp), %r8
movq 0xd8(%rsp), %r9
sbbq 0x18(%rsp), %r9
movq 0xe0(%rsp), %r10
sbbq 0x20(%rsp), %r10
movq 0xe8(%rsp), %r11
sbbq 0x28(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, 0xc0(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0xc8(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0xd0(%rsp)
sbbq $0x0, %r9
movq %r9, 0xd8(%rsp)
sbbq $0x0, %r10
movq %r10, 0xe0(%rsp)
sbbq $0x0, %r11
movq %r11, 0xe8(%rsp)
movq 0x120(%rsp), %rdx
xorl %r15d, %r15d
mulxq 0x90(%rsp), %r8, %r9
mulxq 0x98(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0xa0(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0xa8(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0xb0(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0xb8(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x128(%rsp), %rdx
xorl %r8d, %r8d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x130(%rsp), %rdx
xorl %r9d, %r9d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x138(%rsp), %rdx
xorl %r10d, %r10d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x140(%rsp), %rdx
xorl %r11d, %r11d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x148(%rsp), %rdx
xorl %r12d, %r12d
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0xb8(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movq %r9, 0xa8(%rsp)
movq %r10, 0xb0(%rsp)
movq %r11, 0xb8(%rsp)
movq 0x158(%rsp), %rcx
movq 0x60(%rcx), %rdx
xorl %r15d, %r15d
mulxq 0xf0(%rsp), %r8, %r9
mulxq 0xf8(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0x100(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0x108(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0x110(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0x118(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x68(%rcx), %rdx
xorl %r8d, %r8d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x70(%rcx), %rdx
xorl %r9d, %r9d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x78(%rcx), %rdx
xorl %r10d, %r10d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x80(%rcx), %rdx
xorl %r11d, %r11d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x88(%rcx), %rdx
xorl %r12d, %r12d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xf0(%rsp)
movq %r15, 0xf8(%rsp)
movq %r8, 0x100(%rsp)
movq %r9, 0x108(%rsp)
movq %r10, 0x110(%rsp)
movq %r11, 0x118(%rsp)
movq 0xc0(%rsp), %rdx
xorl %r15d, %r15d
mulxq 0x30(%rsp), %r8, %r9
mulxq 0x38(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0x40(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0x48(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0x50(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0x58(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0xc8(%rsp), %rdx
xorl %r8d, %r8d
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x38(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x40(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x58(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0xd0(%rsp), %rdx
xorl %r9d, %r9d
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x38(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x40(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x58(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0xd8(%rsp), %rdx
xorl %r10d, %r10d
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x38(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x40(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x58(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0xe0(%rsp), %rdx
xorl %r11d, %r11d
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x38(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x40(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x58(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0xe8(%rsp), %rdx
xorl %r12d, %r12d
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x38(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x40(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x58(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
movq %r8, 0xd0(%rsp)
movq %r9, 0xd8(%rsp)
movq %r10, 0xe0(%rsp)
movq %r11, 0xe8(%rsp)
movq 0xc0(%rsp), %rax
subq 0x90(%rsp), %rax
movq 0xc8(%rsp), %rdx
sbbq 0x98(%rsp), %rdx
movq 0xd0(%rsp), %r8
sbbq 0xa0(%rsp), %r8
movq 0xd8(%rsp), %r9
sbbq 0xa8(%rsp), %r9
movq 0xe0(%rsp), %r10
sbbq 0xb0(%rsp), %r10
movq 0xe8(%rsp), %r11
sbbq 0xb8(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %esi
andq %rsi, %rcx
xorq %rsi, %rsi
subq %rcx, %rsi
subq %rsi, %rax
movq %rax, 0xc0(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0xc8(%rsp)
sbbq %rax, %rax
andq %rsi, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0xd0(%rsp)
sbbq $0x0, %r9
movq %r9, 0xd8(%rsp)
sbbq $0x0, %r10
movq %r10, 0xe0(%rsp)
sbbq $0x0, %r11
movq %r11, 0xe8(%rsp)
movq 0x158(%rsp), %rcx
movq 0x60(%rcx), %r8
movq 0x68(%rcx), %r9
movq 0x70(%rcx), %r10
movq 0x78(%rcx), %r11
movq 0x80(%rcx), %rbx
movq 0x88(%rcx), %rbp
movq %r8, %rax
movq %r9, %rdx
orq %r10, %rax
orq %r11, %rdx
orq %rbx, %rax
orq %rbp, %rdx
orq %rdx, %rax
negq %rax
sbbq %rax, %rax
movq 0x150(%rsp), %rsi
movq 0x60(%rsi), %r12
movq 0x68(%rsi), %r13
movq 0x70(%rsi), %r14
movq 0x78(%rsi), %r15
movq 0x80(%rsi), %rdx
movq 0x88(%rsi), %rcx
cmoveq %r12, %r8
cmoveq %r13, %r9
cmoveq %r14, %r10
cmoveq %r15, %r11
cmoveq %rdx, %rbx
cmoveq %rcx, %rbp
orq %r13, %r12
orq %r15, %r14
orq %rcx, %rdx
orq %r14, %r12
orq %r12, %rdx
negq %rdx
sbbq %rdx, %rdx
cmpq %rdx, %rax
cmoveq 0xf0(%rsp), %r8
cmoveq 0xf8(%rsp), %r9
cmoveq 0x100(%rsp), %r10
cmoveq 0x108(%rsp), %r11
cmoveq 0x110(%rsp), %rbx
cmoveq 0x118(%rsp), %rbp
movq %r8, 0xf0(%rsp)
movq %r9, 0xf8(%rsp)
movq %r10, 0x100(%rsp)
movq %r11, 0x108(%rsp)
movq %rbx, 0x110(%rsp)
movq %rbp, 0x118(%rsp)
movq 0x158(%rsp), %rcx
movq 0x150(%rsp), %rsi
movq (%rsp), %r8
cmovbq (%rsi), %r8
cmova (%rcx), %r8
movq 0x8(%rsp), %r9
cmovbq 0x8(%rsi), %r9
cmova 0x8(%rcx), %r9
movq 0x10(%rsp), %r10
cmovbq 0x10(%rsi), %r10
cmova 0x10(%rcx), %r10
movq 0x18(%rsp), %r11
cmovbq 0x18(%rsi), %r11
cmova 0x18(%rcx), %r11
movq 0x20(%rsp), %rbx
cmovbq 0x20(%rsi), %rbx
cmova 0x20(%rcx), %rbx
movq 0x28(%rsp), %rbp
cmovbq 0x28(%rsi), %rbp
cmova 0x28(%rcx), %rbp
movq 0xc0(%rsp), %r12
cmovbq 0x30(%rsi), %r12
cmova 0x30(%rcx), %r12
movq 0xc8(%rsp), %r13
cmovbq 0x38(%rsi), %r13
cmova 0x38(%rcx), %r13
movq 0xd0(%rsp), %r14
cmovbq 0x40(%rsi), %r14
cmova 0x40(%rcx), %r14
movq 0xd8(%rsp), %r15
cmovbq 0x48(%rsi), %r15
cmova 0x48(%rcx), %r15
movq 0xe0(%rsp), %rdx
cmovbq 0x50(%rsi), %rdx
cmova 0x50(%rcx), %rdx
movq 0xe8(%rsp), %rax
cmovbq 0x58(%rsi), %rax
cmova 0x58(%rcx), %rax
movq %r8, (%rdi)
movq %r9, 0x8(%rdi)
movq %r10, 0x10(%rdi)
movq %r11, 0x18(%rdi)
movq %rbx, 0x20(%rdi)
movq %rbp, 0x28(%rdi)
movq 0xf0(%rsp), %r8
movq 0xf8(%rsp), %r9
movq 0x100(%rsp), %r10
movq 0x108(%rsp), %r11
movq 0x110(%rsp), %rbx
movq 0x118(%rsp), %rbp
movq %r12, 0x30(%rdi)
movq %r13, 0x38(%rdi)
movq %r14, 0x40(%rdi)
movq %r15, 0x48(%rdi)
movq %rdx, 0x50(%rdi)
movq %rax, 0x58(%rdi)
movq %r8, 0x60(%rdi)
movq %r9, 0x68(%rdi)
movq %r10, 0x70(%rdi)
movq %r11, 0x78(%rdi)
movq %rbx, 0x80(%rdi)
movq %rbp, 0x88(%rdi)
CFI_INC_RSP(352)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(Lp384_montjscalarmul_p384_montjadd)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp384_montjscalarmul_p384_montjdouble)
Lp384_montjscalarmul_p384_montjdouble:
CFI_START
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(344)
movq %rdi, 0x150(%rsp)
movq 0x60(%rsi), %rdx
mulxq 0x68(%rsi), %r9, %r10
mulxq 0x78(%rsi), %r11, %r12
mulxq 0x88(%rsi), %r13, %r14
movq 0x78(%rsi), %rdx
mulxq 0x80(%rsi), %r15, %rcx
xorl %ebp, %ebp
movq 0x70(%rsi), %rdx
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x68(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x68(%rsi), %rdx
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x88(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbp, %r15
adoxq %rbp, %rcx
adcq %rbp, %rcx
xorl %ebp, %ebp
movq 0x80(%rsi), %rdx
mulxq 0x60(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
movq 0x70(%rsi), %rdx
mulxq 0x78(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x80(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x88(%rsi), %rax, %rdx
adcxq %rax, %r15
adoxq %rdx, %rcx
movq 0x88(%rsi), %rdx
mulxq 0x80(%rsi), %rbx, %rbp
mulxq 0x78(%rsi), %rax, %rdx
adcxq %rax, %rcx
adoxq %rdx, %rbx
movl $0x0, %eax
adcxq %rax, %rbx
adoxq %rax, %rbp
adcq %rax, %rbp
xorq %rax, %rax
movq 0x60(%rsi), %rdx
mulxq 0x60(%rsi), %r8, %rax
adcxq %r9, %r9
adoxq %rax, %r9
movq 0x68(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x70(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x78(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %r15, %r15
adoxq %rdx, %r15
movq 0x80(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %rcx, %rcx
adoxq %rax, %rcx
adcxq %rbx, %rbx
adoxq %rdx, %rbx
movq 0x88(%rsi), %rdx
mulxq %rdx, %rax, %rdi
adcxq %rbp, %rbp
adoxq %rax, %rbp
movl $0x0, %eax
adcxq %rax, %rdi
adoxq %rax, %rdi
movq %rbx, (%rsp)
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r8, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r8
addq %rbx, %rax
adcq %rdx, %r8
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r9
sbbq %r8, %r10
sbbq %rbx, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rdx, %r8
sbbq $0x0, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r9, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r9
addq %rbx, %rax
adcq %rdx, %r9
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r10
sbbq %r9, %r11
sbbq %rbx, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rdx, %r9
sbbq $0x0, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r10, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r10
addq %rbx, %rax
adcq %rdx, %r10
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r11
sbbq %r10, %r12
sbbq %rbx, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rdx, %r10
sbbq $0x0, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r11, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r11
addq %rbx, %rax
adcq %rdx, %r11
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r12
sbbq %r11, %r13
sbbq %rbx, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rdx, %r11
sbbq $0x0, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r12, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r12
addq %rbx, %rax
adcq %rdx, %r12
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r13
sbbq %r12, %r8
sbbq %rbx, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rdx, %r12
sbbq $0x0, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r13, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r13
addq %rbx, %rax
adcq %rdx, %r13
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r8
sbbq %r13, %r9
sbbq %rbx, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rdx, %r13
sbbq $0x0, %r13
movq (%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rdi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rdi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rdi
movq %r14, (%rsp)
movq %r15, 0x8(%rsp)
movq %rcx, 0x10(%rsp)
movq %rbx, 0x18(%rsp)
movq %rbp, 0x20(%rsp)
movq %rdi, 0x28(%rsp)
movq 0x30(%rsi), %rdx
mulxq 0x38(%rsi), %r9, %r10
mulxq 0x48(%rsi), %r11, %r12
mulxq 0x58(%rsi), %r13, %r14
movq 0x48(%rsi), %rdx
mulxq 0x50(%rsi), %r15, %rcx
xorl %ebp, %ebp
movq 0x40(%rsi), %rdx
mulxq 0x30(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x38(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x38(%rsi), %rdx
mulxq 0x48(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x50(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x58(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbp, %r15
adoxq %rbp, %rcx
adcq %rbp, %rcx
xorl %ebp, %ebp
movq 0x50(%rsi), %rdx
mulxq 0x30(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
movq 0x40(%rsi), %rdx
mulxq 0x48(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x50(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x58(%rsi), %rax, %rdx
adcxq %rax, %r15
adoxq %rdx, %rcx
movq 0x58(%rsi), %rdx
mulxq 0x50(%rsi), %rbx, %rbp
mulxq 0x48(%rsi), %rax, %rdx
adcxq %rax, %rcx
adoxq %rdx, %rbx
movl $0x0, %eax
adcxq %rax, %rbx
adoxq %rax, %rbp
adcq %rax, %rbp
xorq %rax, %rax
movq 0x30(%rsi), %rdx
mulxq 0x30(%rsi), %r8, %rax
adcxq %r9, %r9
adoxq %rax, %r9
movq 0x38(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x40(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x48(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %r15, %r15
adoxq %rdx, %r15
movq 0x50(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %rcx, %rcx
adoxq %rax, %rcx
adcxq %rbx, %rbx
adoxq %rdx, %rbx
movq 0x58(%rsi), %rdx
mulxq %rdx, %rax, %rdi
adcxq %rbp, %rbp
adoxq %rax, %rbp
movl $0x0, %eax
adcxq %rax, %rdi
adoxq %rax, %rdi
movq %rbx, 0x30(%rsp)
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r8, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r8
addq %rbx, %rax
adcq %rdx, %r8
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r9
sbbq %r8, %r10
sbbq %rbx, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rdx, %r8
sbbq $0x0, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r9, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r9
addq %rbx, %rax
adcq %rdx, %r9
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r10
sbbq %r9, %r11
sbbq %rbx, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rdx, %r9
sbbq $0x0, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r10, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r10
addq %rbx, %rax
adcq %rdx, %r10
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r11
sbbq %r10, %r12
sbbq %rbx, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rdx, %r10
sbbq $0x0, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r11, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r11
addq %rbx, %rax
adcq %rdx, %r11
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r12
sbbq %r11, %r13
sbbq %rbx, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rdx, %r11
sbbq $0x0, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r12, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r12
addq %rbx, %rax
adcq %rdx, %r12
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r13
sbbq %r12, %r8
sbbq %rbx, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rdx, %r12
sbbq $0x0, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r13, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r13
addq %rbx, %rax
adcq %rdx, %r13
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r8
sbbq %r13, %r9
sbbq %rbx, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rdx, %r13
sbbq $0x0, %r13
movq 0x30(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rdi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rdi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rdi
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq %rcx, 0x40(%rsp)
movq %rbx, 0x48(%rsp)
movq %rbp, 0x50(%rsp)
movq %rdi, 0x58(%rsp)
movq (%rsi), %rax
addq (%rsp), %rax
movq 0x8(%rsi), %rcx
adcq 0x8(%rsp), %rcx
movq 0x10(%rsi), %r8
adcq 0x10(%rsp), %r8
movq 0x18(%rsi), %r9
adcq 0x18(%rsp), %r9
movq 0x20(%rsi), %r10
adcq 0x20(%rsp), %r10
movq 0x28(%rsi), %r11
adcq 0x28(%rsp), %r11
sbbq %rdx, %rdx
movl $0x1, %ebx
andq %rdx, %rbx
movl $0xffffffff, %ebp
andq %rbp, %rdx
xorq %rbp, %rbp
subq %rdx, %rbp
addq %rbp, %rax
movq %rax, 0xf0(%rsp)
adcq %rdx, %rcx
movq %rcx, 0xf8(%rsp)
adcq %rbx, %r8
movq %r8, 0x100(%rsp)
adcq $0x0, %r9
movq %r9, 0x108(%rsp)
adcq $0x0, %r10
movq %r10, 0x110(%rsp)
adcq $0x0, %r11
movq %r11, 0x118(%rsp)
movq (%rsi), %rax
subq (%rsp), %rax
movq 0x8(%rsi), %rdx
sbbq 0x8(%rsp), %rdx
movq 0x10(%rsi), %r8
sbbq 0x10(%rsp), %r8
movq 0x18(%rsi), %r9
sbbq 0x18(%rsp), %r9
movq 0x20(%rsi), %r10
sbbq 0x20(%rsp), %r10
movq 0x28(%rsi), %r11
sbbq 0x28(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %ebx
andq %rbx, %rcx
xorq %rbx, %rbx
subq %rcx, %rbx
subq %rbx, %rax
movq %rax, 0xc0(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0xc8(%rsp)
sbbq %rax, %rax
andq %rbx, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0xd0(%rsp)
sbbq $0x0, %r9
movq %r9, 0xd8(%rsp)
sbbq $0x0, %r10
movq %r10, 0xe0(%rsp)
sbbq $0x0, %r11
movq %r11, 0xe8(%rsp)
movq 0xc0(%rsp), %rdx
xorl %r15d, %r15d
mulxq 0xf0(%rsp), %r8, %r9
mulxq 0xf8(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0x100(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0x108(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0x110(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0x118(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0xc8(%rsp), %rdx
xorl %r8d, %r8d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0xd0(%rsp), %rdx
xorl %r9d, %r9d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0xd8(%rsp), %rdx
xorl %r10d, %r10d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0xe0(%rsp), %rdx
xorl %r11d, %r11d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0xe8(%rsp), %rdx
xorl %r12d, %r12d
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x118(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x60(%rsp)
movq %r15, 0x68(%rsp)
movq %r8, 0x70(%rsp)
movq %r9, 0x78(%rsp)
movq %r10, 0x80(%rsp)
movq %r11, 0x88(%rsp)
movq 0x30(%rsi), %rax
addq 0x60(%rsi), %rax
movq 0x38(%rsi), %rcx
adcq 0x68(%rsi), %rcx
movq 0x40(%rsi), %r8
adcq 0x70(%rsi), %r8
movq 0x48(%rsi), %r9
adcq 0x78(%rsi), %r9
movq 0x50(%rsi), %r10
adcq 0x80(%rsi), %r10
movq 0x58(%rsi), %r11
adcq 0x88(%rsi), %r11
movl $0x0, %edx
adcq %rdx, %rdx
movabsq $0xffffffff00000001, %rbp
addq %rbp, %rax
movl $0xffffffff, %ebp
adcq %rbp, %rcx
adcq $0x1, %r8
adcq $0x0, %r9
adcq $0x0, %r10
adcq $0x0, %r11
adcq $0xffffffffffffffff, %rdx
movl $0x1, %ebx
andq %rdx, %rbx
andq %rbp, %rdx
xorq %rbp, %rbp
subq %rdx, %rbp
subq %rbp, %rax
movq %rax, 0xf0(%rsp)
sbbq %rdx, %rcx
movq %rcx, 0xf8(%rsp)
sbbq %rbx, %r8
movq %r8, 0x100(%rsp)
sbbq $0x0, %r9
movq %r9, 0x108(%rsp)
sbbq $0x0, %r10
movq %r10, 0x110(%rsp)
sbbq $0x0, %r11
movq %r11, 0x118(%rsp)
movq 0x60(%rsp), %rdx
mulxq 0x68(%rsp), %r9, %r10
mulxq 0x78(%rsp), %r11, %r12
mulxq 0x88(%rsp), %r13, %r14
movq 0x78(%rsp), %rdx
mulxq 0x80(%rsp), %r15, %rcx
xorl %ebp, %ebp
movq 0x70(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x68(%rsp), %rdx
mulxq 0x78(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x80(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x88(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbp, %r15
adoxq %rbp, %rcx
adcq %rbp, %rcx
xorl %ebp, %ebp
movq 0x80(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
movq 0x70(%rsp), %rdx
mulxq 0x78(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x80(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x88(%rsp), %rax, %rdx
adcxq %rax, %r15
adoxq %rdx, %rcx
movq 0x88(%rsp), %rdx
mulxq 0x80(%rsp), %rbx, %rbp
mulxq 0x78(%rsp), %rax, %rdx
adcxq %rax, %rcx
adoxq %rdx, %rbx
movl $0x0, %eax
adcxq %rax, %rbx
adoxq %rax, %rbp
adcq %rax, %rbp
xorq %rax, %rax
movq 0x60(%rsp), %rdx
mulxq 0x60(%rsp), %r8, %rax
adcxq %r9, %r9
adoxq %rax, %r9
movq 0x68(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x70(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x78(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %r15, %r15
adoxq %rdx, %r15
movq 0x80(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %rcx, %rcx
adoxq %rax, %rcx
adcxq %rbx, %rbx
adoxq %rdx, %rbx
movq 0x88(%rsp), %rdx
mulxq %rdx, %rax, %rdi
adcxq %rbp, %rbp
adoxq %rax, %rbp
movl $0x0, %eax
adcxq %rax, %rdi
adoxq %rax, %rdi
movq %rbx, 0x120(%rsp)
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r8, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r8
addq %rbx, %rax
adcq %rdx, %r8
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r9
sbbq %r8, %r10
sbbq %rbx, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rdx, %r8
sbbq $0x0, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r9, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r9
addq %rbx, %rax
adcq %rdx, %r9
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r10
sbbq %r9, %r11
sbbq %rbx, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rdx, %r9
sbbq $0x0, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r10, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r10
addq %rbx, %rax
adcq %rdx, %r10
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r11
sbbq %r10, %r12
sbbq %rbx, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rdx, %r10
sbbq $0x0, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r11, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r11
addq %rbx, %rax
adcq %rdx, %r11
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r12
sbbq %r11, %r13
sbbq %rbx, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rdx, %r11
sbbq $0x0, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r12, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r12
addq %rbx, %rax
adcq %rdx, %r12
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r13
sbbq %r12, %r8
sbbq %rbx, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rdx, %r12
sbbq $0x0, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r13, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r13
addq %rbx, %rax
adcq %rdx, %r13
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r8
sbbq %r13, %r9
sbbq %rbx, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rdx, %r13
sbbq $0x0, %r13
movq 0x120(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rdi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rdi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rdi
movq %r14, 0x120(%rsp)
movq %r15, 0x128(%rsp)
movq %rcx, 0x130(%rsp)
movq %rbx, 0x138(%rsp)
movq %rbp, 0x140(%rsp)
movq %rdi, 0x148(%rsp)
movq 0x30(%rsp), %rdx
xorl %r15d, %r15d
mulxq (%rsi), %r8, %r9
mulxq 0x8(%rsi), %rbx, %r10
addq %rbx, %r9
mulxq 0x10(%rsi), %rbx, %r11
adcq %rbx, %r10
mulxq 0x18(%rsi), %rbx, %r12
adcq %rbx, %r11
mulxq 0x20(%rsi), %rbx, %r13
adcq %rbx, %r12
mulxq 0x28(%rsi), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x38(%rsp), %rdx
xorl %r8d, %r8d
mulxq (%rsi), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x8(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x10(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x18(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x20(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x28(%rsi), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x40(%rsp), %rdx
xorl %r9d, %r9d
mulxq (%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x8(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x10(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x18(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x20(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x28(%rsi), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x48(%rsp), %rdx
xorl %r10d, %r10d
mulxq (%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x8(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x10(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x18(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x20(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x28(%rsi), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x50(%rsp), %rdx
xorl %r11d, %r11d
mulxq (%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x8(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x10(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x18(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x20(%rsi), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x28(%rsi), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x58(%rsp), %rdx
xorl %r12d, %r12d
mulxq (%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x8(%rsi), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x10(%rsi), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x18(%rsi), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x20(%rsi), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x28(%rsi), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movq %r9, 0xa8(%rsp)
movq %r10, 0xb0(%rsp)
movq %r11, 0xb8(%rsp)
movq 0xf0(%rsp), %rdx
mulxq 0xf8(%rsp), %r9, %r10
mulxq 0x108(%rsp), %r11, %r12
mulxq 0x118(%rsp), %r13, %r14
movq 0x108(%rsp), %rdx
mulxq 0x110(%rsp), %r15, %rcx
xorl %ebp, %ebp
movq 0x100(%rsp), %rdx
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0xf8(%rsp), %rdx
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x118(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbp, %r15
adoxq %rbp, %rcx
adcq %rbp, %rcx
xorl %ebp, %ebp
movq 0x110(%rsp), %rdx
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
movq 0x100(%rsp), %rdx
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x118(%rsp), %rax, %rdx
adcxq %rax, %r15
adoxq %rdx, %rcx
movq 0x118(%rsp), %rdx
mulxq 0x110(%rsp), %rbx, %rbp
mulxq 0x108(%rsp), %rax, %rdx
adcxq %rax, %rcx
adoxq %rdx, %rbx
movl $0x0, %eax
adcxq %rax, %rbx
adoxq %rax, %rbp
adcq %rax, %rbp
xorq %rax, %rax
movq 0xf0(%rsp), %rdx
mulxq 0xf0(%rsp), %r8, %rax
adcxq %r9, %r9
adoxq %rax, %r9
movq 0xf8(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x100(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x108(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %r15, %r15
adoxq %rdx, %r15
movq 0x110(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %rcx, %rcx
adoxq %rax, %rcx
adcxq %rbx, %rbx
adoxq %rdx, %rbx
movq 0x118(%rsp), %rdx
mulxq %rdx, %rax, %rdi
adcxq %rbp, %rbp
adoxq %rax, %rbp
movl $0x0, %eax
adcxq %rax, %rdi
adoxq %rax, %rdi
movq %rbx, 0xc0(%rsp)
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r8, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r8
addq %rbx, %rax
adcq %rdx, %r8
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r9
sbbq %r8, %r10
sbbq %rbx, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rdx, %r8
sbbq $0x0, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r9, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r9
addq %rbx, %rax
adcq %rdx, %r9
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r10
sbbq %r9, %r11
sbbq %rbx, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rdx, %r9
sbbq $0x0, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r10, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r10
addq %rbx, %rax
adcq %rdx, %r10
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r11
sbbq %r10, %r12
sbbq %rbx, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rdx, %r10
sbbq $0x0, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r11, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r11
addq %rbx, %rax
adcq %rdx, %r11
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r12
sbbq %r11, %r13
sbbq %rbx, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rdx, %r11
sbbq $0x0, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r12, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r12
addq %rbx, %rax
adcq %rdx, %r12
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r13
sbbq %r12, %r8
sbbq %rbx, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rdx, %r12
sbbq $0x0, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r13, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r13
addq %rbx, %rax
adcq %rdx, %r13
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r8
sbbq %r13, %r9
sbbq %rbx, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rdx, %r13
sbbq $0x0, %r13
movq 0xc0(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rdi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rdi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rdi
movq %r14, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
movq %rcx, 0xd0(%rsp)
movq %rbx, 0xd8(%rsp)
movq %rbp, 0xe0(%rsp)
movq %rdi, 0xe8(%rsp)
movabsq $0xffffffff, %r8
subq 0x120(%rsp), %r8
movabsq $0xffffffff00000000, %r9
sbbq 0x128(%rsp), %r9
movq $0xfffffffffffffffe, %r10
sbbq 0x130(%rsp), %r10
movq $0xffffffffffffffff, %r11
sbbq 0x138(%rsp), %r11
movq $0xffffffffffffffff, %r12
sbbq 0x140(%rsp), %r12
movq $0xffffffffffffffff, %r13
sbbq 0x148(%rsp), %r13
movq $0x9, %rdx
mulxq %r8, %r8, %rax
mulxq %r9, %r9, %rcx
addq %rax, %r9
mulxq %r10, %r10, %rax
adcq %rcx, %r10
mulxq %r11, %r11, %rcx
adcq %rax, %r11
mulxq %r12, %r12, %rax
adcq %rcx, %r12
mulxq %r13, %r13, %r14
adcq %rax, %r13
adcq $0x1, %r14
xorl %ecx, %ecx
movq $0xc, %rdx
mulxq 0x90(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x98(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb8(%rsp), %rax, %rdx
adcxq %rax, %r13
adoxq %r14, %rdx
adcxq %rcx, %rdx
xorq %rcx, %rcx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movl $0xffffffff, %eax
mulxq %rax, %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %rdx, %r10
movl $0x0, %eax
movl $0x0, %ecx
adoxq %rax, %rax
adcq %rax, %r11
adcq %rcx, %r12
adcq %rcx, %r13
adcq %rcx, %rcx
subq $0x1, %rcx
movl $0xffffffff, %edx
xorq %rax, %rax
andq %rcx, %rdx
subq %rdx, %rax
andq $0x1, %rcx
subq %rax, %r8
movq %r8, 0x120(%rsp)
sbbq %rdx, %r9
movq %r9, 0x128(%rsp)
sbbq %rcx, %r10
movq %r10, 0x130(%rsp)
sbbq $0x0, %r11
movq %r11, 0x138(%rsp)
sbbq $0x0, %r12
movq %r12, 0x140(%rsp)
sbbq $0x0, %r13
movq %r13, 0x148(%rsp)
movq 0xc0(%rsp), %rax
subq (%rsp), %rax
movq 0xc8(%rsp), %rdx
sbbq 0x8(%rsp), %rdx
movq 0xd0(%rsp), %r8
sbbq 0x10(%rsp), %r8
movq 0xd8(%rsp), %r9
sbbq 0x18(%rsp), %r9
movq 0xe0(%rsp), %r10
sbbq 0x20(%rsp), %r10
movq 0xe8(%rsp), %r11
sbbq 0x28(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %ebx
andq %rbx, %rcx
xorq %rbx, %rbx
subq %rcx, %rbx
subq %rbx, %rax
movq %rax, 0xf0(%rsp)
sbbq %rcx, %rdx
movq %rdx, 0xf8(%rsp)
sbbq %rax, %rax
andq %rbx, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x100(%rsp)
sbbq $0x0, %r9
movq %r9, 0x108(%rsp)
sbbq $0x0, %r10
movq %r10, 0x110(%rsp)
sbbq $0x0, %r11
movq %r11, 0x118(%rsp)
movq 0x30(%rsp), %rdx
mulxq 0x38(%rsp), %r9, %r10
mulxq 0x48(%rsp), %r11, %r12
mulxq 0x58(%rsp), %r13, %r14
movq 0x48(%rsp), %rdx
mulxq 0x50(%rsp), %r15, %rcx
xorl %ebp, %ebp
movq 0x40(%rsp), %rdx
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x38(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x38(%rsp), %rdx
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x58(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adcxq %rbp, %r15
adoxq %rbp, %rcx
adcq %rbp, %rcx
xorl %ebp, %ebp
movq 0x50(%rsp), %rdx
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
movq 0x40(%rsp), %rdx
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x58(%rsp), %rax, %rdx
adcxq %rax, %r15
adoxq %rdx, %rcx
movq 0x58(%rsp), %rdx
mulxq 0x50(%rsp), %rbx, %rbp
mulxq 0x48(%rsp), %rax, %rdx
adcxq %rax, %rcx
adoxq %rdx, %rbx
movl $0x0, %eax
adcxq %rax, %rbx
adoxq %rax, %rbp
adcq %rax, %rbp
xorq %rax, %rax
movq 0x30(%rsp), %rdx
mulxq 0x30(%rsp), %r8, %rax
adcxq %r9, %r9
adoxq %rax, %r9
movq 0x38(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x40(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x48(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %r15, %r15
adoxq %rdx, %r15
movq 0x50(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %rcx, %rcx
adoxq %rax, %rcx
adcxq %rbx, %rbx
adoxq %rdx, %rbx
movq 0x58(%rsp), %rdx
mulxq %rdx, %rax, %rdi
adcxq %rbp, %rbp
adoxq %rax, %rbp
movl $0x0, %eax
adcxq %rax, %rdi
adoxq %rax, %rdi
movq %rbx, 0xc0(%rsp)
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r8, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r8
addq %rbx, %rax
adcq %rdx, %r8
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r9
sbbq %r8, %r10
sbbq %rbx, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
movq %rdx, %r8
sbbq $0x0, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r9, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r9
addq %rbx, %rax
adcq %rdx, %r9
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r10
sbbq %r9, %r11
sbbq %rbx, %r12
sbbq $0x0, %r13
sbbq $0x0, %r8
movq %rdx, %r9
sbbq $0x0, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r10, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r10
addq %rbx, %rax
adcq %rdx, %r10
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r11
sbbq %r10, %r12
sbbq %rbx, %r13
sbbq $0x0, %r8
sbbq $0x0, %r9
movq %rdx, %r10
sbbq $0x0, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r11, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r11
addq %rbx, %rax
adcq %rdx, %r11
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r12
sbbq %r11, %r13
sbbq %rbx, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
movq %rdx, %r11
sbbq $0x0, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r12, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r12
addq %rbx, %rax
adcq %rdx, %r12
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r13
sbbq %r12, %r8
sbbq %rbx, %r9
sbbq $0x0, %r10
sbbq $0x0, %r11
movq %rdx, %r12
sbbq $0x0, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %r13, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %rbx, %r13
addq %rbx, %rax
adcq %rdx, %r13
movl $0x0, %ebx
adcq %rbx, %rbx
subq %rax, %r8
sbbq %r13, %r9
sbbq %rbx, %r10
sbbq $0x0, %r11
sbbq $0x0, %r12
movq %rdx, %r13
sbbq $0x0, %r13
movq 0xc0(%rsp), %rbx
addq %r8, %r14
adcq %r9, %r15
adcq %r10, %rcx
adcq %r11, %rbx
adcq %r12, %rbp
adcq %r13, %rdi
movl $0x0, %r8d
adcq %r8, %r8
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %r9d
adcq %r15, %r9
movl $0x1, %r10d
adcq %rcx, %r10
adcq %rbx, %r11
adcq %rbp, %r12
adcq %rdi, %r13
adcq $0x0, %r8
cmovneq %rax, %r14
cmovneq %r9, %r15
cmovneq %r10, %rcx
cmovneq %r11, %rbx
cmovneq %r12, %rbp
cmovneq %r13, %rdi
movq %r14, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
movq %rcx, 0xd0(%rsp)
movq %rbx, 0xd8(%rsp)
movq %rbp, 0xe0(%rsp)
movq %rdi, 0xe8(%rsp)
movq 0x150(%rsp), %rdi
movq 0xf0(%rsp), %rax
subq 0x30(%rsp), %rax
movq 0xf8(%rsp), %rdx
sbbq 0x38(%rsp), %rdx
movq 0x100(%rsp), %r8
sbbq 0x40(%rsp), %r8
movq 0x108(%rsp), %r9
sbbq 0x48(%rsp), %r9
movq 0x110(%rsp), %r10
sbbq 0x50(%rsp), %r10
movq 0x118(%rsp), %r11
sbbq 0x58(%rsp), %r11
sbbq %rcx, %rcx
movl $0xffffffff, %ebx
andq %rbx, %rcx
xorq %rbx, %rbx
subq %rcx, %rbx
subq %rbx, %rax
movq %rax, 0x60(%rdi)
sbbq %rcx, %rdx
movq %rdx, 0x68(%rdi)
sbbq %rax, %rax
andq %rbx, %rcx
negq %rax
sbbq %rcx, %r8
movq %r8, 0x70(%rdi)
sbbq $0x0, %r9
movq %r9, 0x78(%rdi)
sbbq $0x0, %r10
movq %r10, 0x80(%rdi)
sbbq $0x0, %r11
movq %r11, 0x88(%rdi)
movq 0x60(%rsp), %rdx
xorl %r15d, %r15d
mulxq 0x120(%rsp), %r8, %r9
mulxq 0x128(%rsp), %rbx, %r10
addq %rbx, %r9
mulxq 0x130(%rsp), %rbx, %r11
adcq %rbx, %r10
mulxq 0x138(%rsp), %rbx, %r12
adcq %rbx, %r11
mulxq 0x140(%rsp), %rbx, %r13
adcq %rbx, %r12
mulxq 0x148(%rsp), %rbx, %r14
adcq %rbx, %r13
adcq %r15, %r14
movq %r8, %rdx
shlq $0x20, %rdx
addq %r8, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r8, %rbx
adcq %r8, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r9
sbbq %rbx, %r10
sbbq %rbp, %r11
sbbq $0x0, %r12
sbbq $0x0, %r13
sbbq $0x0, %rdx
addq %rdx, %r14
adcq $0x0, %r15
movq 0x68(%rsp), %rdx
xorl %r8d, %r8d
mulxq 0x120(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x128(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x130(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x138(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x140(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
adoxq %r8, %r15
mulxq 0x148(%rsp), %rax, %rbx
adcq %rax, %r14
adcq %rbx, %r15
adcq %r8, %r8
movq %r9, %rdx
shlq $0x20, %rdx
addq %r9, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r9, %rbx
adcq %r9, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r10
sbbq %rbx, %r11
sbbq %rbp, %r12
sbbq $0x0, %r13
sbbq $0x0, %r14
sbbq $0x0, %rdx
addq %rdx, %r15
adcq $0x0, %r8
movq 0x70(%rsp), %rdx
xorl %r9d, %r9d
mulxq 0x120(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x128(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x130(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x138(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x140(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
adoxq %r9, %r8
mulxq 0x148(%rsp), %rax, %rbx
adcq %rax, %r15
adcq %rbx, %r8
adcq %r9, %r9
movq %r10, %rdx
shlq $0x20, %rdx
addq %r10, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r10, %rbx
adcq %r10, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r11
sbbq %rbx, %r12
sbbq %rbp, %r13
sbbq $0x0, %r14
sbbq $0x0, %r15
sbbq $0x0, %rdx
addq %rdx, %r8
adcq $0x0, %r9
movq 0x78(%rsp), %rdx
xorl %r10d, %r10d
mulxq 0x120(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x128(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x130(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x138(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x140(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
adoxq %r10, %r9
mulxq 0x148(%rsp), %rax, %rbx
adcq %rax, %r8
adcq %rbx, %r9
adcq %r10, %r10
movq %r11, %rdx
shlq $0x20, %rdx
addq %r11, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r11, %rbx
adcq %r11, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rbp, %r14
sbbq $0x0, %r15
sbbq $0x0, %r8
sbbq $0x0, %rdx
addq %rdx, %r9
adcq $0x0, %r10
movq 0x80(%rsp), %rdx
xorl %r11d, %r11d
mulxq 0x120(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x128(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x130(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x138(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x140(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
adoxq %r11, %r10
mulxq 0x148(%rsp), %rax, %rbx
adcq %rax, %r9
adcq %rbx, %r10
adcq %r11, %r11
movq %r12, %rdx
shlq $0x20, %rdx
addq %r12, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r12, %rbx
adcq %r12, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r13
sbbq %rbx, %r14
sbbq %rbp, %r15
sbbq $0x0, %r8
sbbq $0x0, %r9
sbbq $0x0, %rdx
addq %rdx, %r10
adcq $0x0, %r11
movq 0x88(%rsp), %rdx
xorl %r12d, %r12d
mulxq 0x120(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x128(%rsp), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x130(%rsp), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x138(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x140(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
adoxq %r12, %r11
mulxq 0x148(%rsp), %rax, %rbx
adcq %rax, %r10
adcq %rbx, %r11
adcq %r12, %r12
movq %r13, %rdx
shlq $0x20, %rdx
addq %r13, %rdx
xorl %ebp, %ebp
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rbx, %rax
movl $0xffffffff, %ebx
mulxq %rbx, %r13, %rbx
adcq %r13, %rax
adcq %rdx, %rbx
adcl %ebp, %ebp
subq %rax, %r14
sbbq %rbx, %r15
sbbq %rbp, %r8
sbbq $0x0, %r9
sbbq $0x0, %r10
sbbq $0x0, %rdx
addq %rdx, %r11
adcq $0x0, %r12
xorl %edx, %edx
xorl %ebp, %ebp
xorl %r13d, %r13d
movabsq $0xffffffff00000001, %rax
addq %r14, %rax
movl $0xffffffff, %ebx
adcq %r15, %rbx
movl $0x1, %ecx
adcq %r8, %rcx
adcq %r9, %rdx
adcq %r10, %rbp
adcq %r11, %r13
adcq $0x0, %r12
cmovneq %rax, %r14
cmovneq %rbx, %r15
cmovneq %rcx, %r8
cmovneq %rdx, %r9
cmovneq %rbp, %r10
cmovneq %r13, %r11
movq %r14, 0xf0(%rsp)
movq %r15, 0xf8(%rsp)
movq %r8, 0x100(%rsp)
movq %r9, 0x108(%rsp)
movq %r10, 0x110(%rsp)
movq %r11, 0x118(%rsp)
movq 0xb8(%rsp), %rdx
movq %rdx, %r13
shrq $0x3e, %rdx
movq 0xb0(%rsp), %r12
shldq $0x2, %r12, %r13
movq 0xa8(%rsp), %r11
shldq $0x2, %r11, %r12
movq 0xa0(%rsp), %r10
shldq $0x2, %r10, %r11
movq 0x98(%rsp), %r9
shldq $0x2, %r9, %r10
movq 0x90(%rsp), %r8
shldq $0x2, %r8, %r9
shlq $0x2, %r8
addq $0x1, %rdx
subq 0x120(%rsp), %r8
sbbq 0x128(%rsp), %r9
sbbq 0x130(%rsp), %r10
sbbq 0x138(%rsp), %r11
sbbq 0x140(%rsp), %r12
sbbq 0x148(%rsp), %r13
sbbq $0x0, %rdx
xorq %rcx, %rcx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movl $0xffffffff, %eax
mulxq %rax, %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %rdx, %r10
movl $0x0, %eax
movl $0x0, %ecx
adoxq %rax, %rax
adcq %rax, %r11
adcq %rcx, %r12
adcq %rcx, %r13
adcq %rcx, %rcx
subq $0x1, %rcx
movl $0xffffffff, %edx
xorq %rax, %rax
andq %rcx, %rdx
subq %rdx, %rax
andq $0x1, %rcx
subq %rax, %r8
movq %r8, (%rdi)
sbbq %rdx, %r9
movq %r9, 0x8(%rdi)
sbbq %rcx, %r10
movq %r10, 0x10(%rdi)
sbbq $0x0, %r11
movq %r11, 0x18(%rdi)
sbbq $0x0, %r12
movq %r12, 0x20(%rdi)
sbbq $0x0, %r13
movq %r13, 0x28(%rdi)
movabsq $0xffffffff, %r8
subq 0xc0(%rsp), %r8
movabsq $0xffffffff00000000, %r9
sbbq 0xc8(%rsp), %r9
movq $0xfffffffffffffffe, %r10
sbbq 0xd0(%rsp), %r10
movq $0xffffffffffffffff, %r11
sbbq 0xd8(%rsp), %r11
movq $0xffffffffffffffff, %r12
sbbq 0xe0(%rsp), %r12
movq $0xffffffffffffffff, %r13
sbbq 0xe8(%rsp), %r13
movq %r13, %r14
shrq $0x3d, %r14
shldq $0x3, %r12, %r13
shldq $0x3, %r11, %r12
shldq $0x3, %r10, %r11
shldq $0x3, %r9, %r10
shldq $0x3, %r8, %r9
shlq $0x3, %r8
addq $0x1, %r14
xorl %ecx, %ecx
movq $0x3, %rdx
mulxq 0xf0(%rsp), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0xf8(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x100(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x108(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x110(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x118(%rsp), %rax, %rdx
adcxq %rax, %r13
adoxq %r14, %rdx
adcxq %rcx, %rdx
xorq %rcx, %rcx
movabsq $0xffffffff00000001, %rax
mulxq %rax, %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
movl $0xffffffff, %eax
mulxq %rax, %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
adcxq %rdx, %r10
movl $0x0, %eax
movl $0x0, %ecx
adoxq %rax, %rax
adcq %rax, %r11
adcq %rcx, %r12
adcq %rcx, %r13
adcq %rcx, %rcx
subq $0x1, %rcx
movl $0xffffffff, %edx
xorq %rax, %rax
andq %rcx, %rdx
subq %rdx, %rax
andq $0x1, %rcx
subq %rax, %r8
movq %r8, 0x30(%rdi)
sbbq %rdx, %r9
movq %r9, 0x38(%rdi)
sbbq %rcx, %r10
movq %r10, 0x40(%rdi)
sbbq $0x0, %r11
movq %r11, 0x48(%rdi)
sbbq $0x0, %r12
movq %r12, 0x50(%rdi)
sbbq $0x0, %r13
movq %r13, 0x58(%rdi)
CFI_INC_RSP(344)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(Lp384_montjscalarmul_p384_montjdouble)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 2,133
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_mux_6.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// 384-bit multiplex/select z := x (if p nonzero) or z := y (if p zero)
// Inputs p, x[6], y[6]; output z[6]
//
// extern void bignum_mux_6(uint64_t p, uint64_t z[static 6],
// const uint64_t x[static 6],
// const uint64_t y[static 6]);
//
// It is assumed that all numbers x, y and z have the same size 6 digits.
//
// Standard x86-64 ABI: RDI = p, RSI = z, RDX = x, RCX = y
// Microsoft x64 ABI: RCX = p, RDX = z, R8 = x, R9 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mux_6)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mux_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mux_6)
.text
#define p %rdi
#define z %rsi
#define x %rdx
#define y %rcx
#define a %rax
#define b %r8
S2N_BN_SYMBOL(bignum_mux_6):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
movq %r9, %rcx
#endif
testq p, p
movq (x), a
movq (y), b
cmovzq b, a
movq a, (z)
movq 8(x), a
movq 8(y), b
cmovzq b, a
movq a, 8(z)
movq 16(x), a
movq 16(y), b
cmovzq b, a
movq a, 16(z)
movq 24(x), a
movq 24(y), b
cmovzq b, a
movq a, 24(z)
movq 32(x), a
movq 32(y), b
cmovzq b, a
movq a, 32(z)
movq 40(x), a
movq 40(y), b
cmovzq b, a
movq a, 40(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mux_6)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 5,585
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_mod_n384_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo group order, z := x mod n_384
// Input x[k]; output z[6]
//
// extern void bignum_mod_n384_alt(uint64_t z[static 6], uint64_t k,
// const uint64_t *x);
//
// Reduction is modulo the group order of the NIST curve P-384.
//
// Standard x86-64 ABI: RDI = z, RSI = k, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = k, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n384_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_n384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n384_alt)
.text
#define z %rdi
#define k %rsi
#define x %rcx
#define m0 %r8
#define m1 %r9
#define m2 %r10
#define m3 %r11
#define m4 %r12
#define m5 %r13
#define d %r14
#define n0 %rax
#define n1 %rbx
#define n2 %rdx
#define q %rbp
#define c %rbx
#define n0short %eax
#define qshort %ebp
S2N_BN_SYMBOL(bignum_mod_n384_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save extra registers
CFI_PUSH(%rbp)
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
// If the input is already <= 5 words long, go to a trivial "copy" path
cmpq $6, k
jc Lbignum_mod_n384_alt_shortinput
// Otherwise load the top 6 digits (top-down) and reduce k by 6
subq $6, k
movq 40(%rdx,k,8), m5
movq 32(%rdx,k,8), m4
movq 24(%rdx,k,8), m3
movq 16(%rdx,k,8), m2
movq 8(%rdx,k,8), m1
movq (%rdx,k,8), m0
// Move x into another register to leave %rdx free for multiplies and use of n2
movq %rdx, x
// Reduce the top 6 digits mod n_384 (a conditional subtraction of n_384)
movq $0x1313e695333ad68d, n0
movq $0xa7e5f24db74f5885, n1
movq $0x389cb27e0bc8d220, n2
addq n0, m0
adcq n1, m1
adcq n2, m2
adcq $0, m3
adcq $0, m4
adcq $0, m5
sbbq d, d
notq d
andq d, n0
andq d, n1
andq d, n2
subq n0, m0
sbbq n1, m1
sbbq n2, m2
sbbq $0, m3
sbbq $0, m4
sbbq $0, m5
// Now do (k-6) iterations of 7->6 word modular reduction
testq k, k
jz Lbignum_mod_n384_alt_writeback
Lbignum_mod_n384_alt_loop:
// Compute q = min (m5 + 1) (2^64 - 1)
movl $1, qshort
addq m5, q
sbbq d, d
orq d, q
// Load the next digit so current m to reduce = [m5;m4;m3;m2;m1;m0;d]
movq -8(x,k,8), d
// Now form [m5;m4;m3;m2;m1;m0;d] = m - q * n_384
subq q, m5
movq $0x1313e695333ad68d, %rax
mulq q
addq %rax, d
adcq %rdx, m0
sbbq c, c
movq $0xa7e5f24db74f5885, %rax
mulq q
subq c, %rdx
addq %rax, m0
adcq %rdx, m1
sbbq c, c
movq $0x389cb27e0bc8d220, n0
mulq q
subq c, %rdx
addq %rax, m1
adcq %rdx, m2
adcq $0, m3
adcq $0, m4
adcq $0, m5
// Now our top word m5 is either zero or all 1s. Use it for a masked
// addition of n_384, which we can do by a *subtraction* of
// 2^384 - n_384 from our portion
movq $0x1313e695333ad68d, n0
andq m5, n0
movq $0xa7e5f24db74f5885, n1
andq m5, n1
movq $0x389cb27e0bc8d220, n2
andq m5, n2
subq n0, d
sbbq n1, m0
sbbq n2, m1
sbbq $0, m2
sbbq $0, m3
sbbq $0, m4
// Now shuffle registers up and loop
movq m4, m5
movq m3, m4
movq m2, m3
movq m1, m2
movq m0, m1
movq d, m0
decq k
jnz Lbignum_mod_n384_alt_loop
// Write back
Lbignum_mod_n384_alt_writeback:
movq m0, (z)
movq m1, 8(z)
movq m2, 16(z)
movq m3, 24(z)
movq m4, 32(z)
movq m5, 40(z)
// Restore registers and return
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
CFI_POP(%rbp)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mod_n384_alt)
Lbignum_mod_n384_alt_shortinput:
xorq m0, m0
xorq m1, m1
xorq m2, m2
xorq m3, m3
xorq m4, m4
xorq m5, m5
testq k, k
jz Lbignum_mod_n384_alt_writeback
movq (%rdx), m0
decq k
jz Lbignum_mod_n384_alt_writeback
movq 8(%rdx), m1
decq k
jz Lbignum_mod_n384_alt_writeback
movq 16(%rdx), m2
decq k
jz Lbignum_mod_n384_alt_writeback
movq 24(%rdx), m3
decq k
jz Lbignum_mod_n384_alt_writeback
movq 32(%rdx), m4
jmp Lbignum_mod_n384_alt_writeback
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 5,671
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_deamont_p384_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from almost-Montgomery form, z := (x / 2^384) mod p_384
// Input x[6]; output z[6]
//
// extern void bignum_deamont_p384_alt(uint64_t z[static 6],
// const uint64_t x[static 6]);
//
// Convert a 6-digit bignum x out of its (optionally almost) Montgomery form,
// "almost" meaning any 6-digit input will work, with no range restriction.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p384_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_deamont_p384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p384_alt)
.text
#define z %rdi
#define x %rsi
// Additional temps in the correction phase
#define u %rax
#define v %rcx
#define w %rdx
#define vshort %ecx
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1],
// adding to the existing [d5;d4;d3;d2;d1] and re-using d0 as a
// temporary internally, as well as %rax, %rcx and %rdx.
// It is OK for d6 and d0 to be the same register (they often are)
//
// We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w
// where w = [d0 + (d0<<32)] mod 2^64
//
// montreds(d6,d5,d4,d3,d2,d1,d0)
#define montreds(d6,d5,d4,d3,d2,d1,d0) \
/* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \
movq d0, %rcx ; \
shlq $32, %rcx ; \
addq d0, %rcx ; \
/* Construct [%rax;%rdx;d0;-] = (2^384 - p_384) * w */ \
/* We know the lowest word will cancel so we can re-use d0 */ \
/* and %rcx as temps. */ \
movq $0xffffffff00000001, %rax ; \
mulq %rcx; \
movq %rdx, d0 ; \
movq $0x00000000ffffffff, %rax ; \
mulq %rcx; \
addq %rax, d0 ; \
movl $0, %eax ; \
adcq %rcx, %rdx ; \
adcl %eax, %eax ; \
/* Now subtract that and add 2^384 * w */ \
subq d0, d1 ; \
sbbq %rdx, d2 ; \
sbbq %rax, d3 ; \
sbbq $0, d4 ; \
sbbq $0, d5 ; \
movq %rcx, d6 ; \
sbbq $0, d6
S2N_BN_SYMBOL(bignum_deamont_p384_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with
CFI_PUSH(%r12)
CFI_PUSH(%r13)
// Set up an initial window [%r13,%r12,%r11,%r10,%r9,%r8] = x
movq (x), %r8
movq 8(x), %r9
movq 16(x), %r10
movq 24(x), %r11
movq 32(x), %r12
movq 40(x), %r13
// Montgomery reduce window 0
montreds(%r8,%r13,%r12,%r11,%r10,%r9,%r8)
// Montgomery reduce window 1
montreds(%r9,%r8,%r13,%r12,%r11,%r10,%r9)
// Montgomery reduce window 2
montreds(%r10,%r9,%r8,%r13,%r12,%r11,%r10)
// Montgomery reduce window 3
montreds(%r11,%r10,%r9,%r8,%r13,%r12,%r11)
// Montgomery reduce window 4
montreds(%r12,%r11,%r10,%r9,%r8,%r13,%r12)
// Montgomery reduce window 5
montreds(%r13,%r12,%r11,%r10,%r9,%r8,%r13)
// Do a test addition of dd = [%r13;%r12;%r11;%r10;%r9;%r8] and
// 2^384 - p_384 = [0;0;0;1;v;u], hence setting CF iff
// dd + (2^384 - p_384) >= 2^384, hence iff dd >= p_384.
movq $0xffffffff00000001, u
movl $0x00000000ffffffff, vshort
movq %r8, w
addq u, w
movq %r9, w
adcq v, w
movq %r10, w
adcq $1, w
movq %r11, w
adcq $0, w
movq %r12, w
adcq $0, w
movq %r13, w
adcq $0, w
// Convert CF to a bitmask in w
sbbq w, w
// Masked addition of 2^384 - p_384, hence subtraction of p_384
andq w, u
andq w, v
andq $1, w
addq u, %r8
adcq v, %r9
adcq w, %r10
adcq $0, %r11
adcq $0, %r12
adcq $0, %r13
// Write back the result
movq %r8, (z)
movq %r9, 8(z)
movq %r10, 16(z)
movq %r11, 24(z)
movq %r12, 32(z)
movq %r13, 40(z)
// Restore registers and return
CFI_POP(%r13)
CFI_POP(%r12)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_deamont_p384_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 47,730
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/p384_montjdouble.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjdouble(uint64_t p3[static 18],
// const uint64_t p1[static 18]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard x86-64 ABI: RDI = p3, RSI = p1
// Microsoft x64 ABI: RCX = p3, RDX = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjdouble)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjdouble)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjdouble)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1. The latter stays true
// but montsqr below modifies %rdi as well. Thus, we need
// to save %rdi and restore it before the writes to outputs.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z2 (NUMSIZE*0)(%rsp)
#define y2 (NUMSIZE*1)(%rsp)
#define x2p (NUMSIZE*2)(%rsp)
#define xy2 (NUMSIZE*3)(%rsp)
#define y4 (NUMSIZE*4)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define dx2 (NUMSIZE*5)(%rsp)
#define t1 (NUMSIZE*5)(%rsp)
#define d (NUMSIZE*6)(%rsp)
#define x4p (NUMSIZE*6)(%rsp)
// Safe place for pointer to the output
#define input_z (NUMSIZE*7)(%rsp)
#define NSPACE 344
// Corresponds exactly to bignum_montmul_p384
#define montmul_p384(P0,P1,P2) \
movq P2, %rdx ; \
xorl %r15d, %r15d ; \
mulxq P1, %r8, %r9 ; \
mulxq 0x8+P1, %rbx, %r10 ; \
addq %rbx, %r9 ; \
mulxq 0x10+P1, %rbx, %r11 ; \
adcq %rbx, %r10 ; \
mulxq 0x18+P1, %rbx, %r12 ; \
adcq %rbx, %r11 ; \
mulxq 0x20+P1, %rbx, %r13 ; \
adcq %rbx, %r12 ; \
mulxq 0x28+P1, %rbx, %r14 ; \
adcq %rbx, %r13 ; \
adcq %r15, %r14 ; \
movq %r8, %rdx ; \
shlq $0x20, %rdx ; \
addq %r8, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r8, %rbx ; \
adcq %r8, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r9 ; \
sbbq %rbx, %r10 ; \
sbbq %rbp, %r11 ; \
sbbq $0x0, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x8+P2, %rdx ; \
xorl %r8d, %r8d ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
adoxq %r8, %r15 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcq %rax, %r14 ; \
adcq %rbx, %r15 ; \
adcq %r8, %r8 ; \
movq %r9, %rdx ; \
shlq $0x20, %rdx ; \
addq %r9, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r9, %rbx ; \
adcq %r9, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r10 ; \
sbbq %rbx, %r11 ; \
sbbq %rbp, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %r14 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x10+P2, %rdx ; \
xorl %r9d, %r9d ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
adoxq %r9, %r8 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcq %rax, %r15 ; \
adcq %rbx, %r8 ; \
adcq %r9, %r9 ; \
movq %r10, %rdx ; \
shlq $0x20, %rdx ; \
addq %r10, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r10, %rbx ; \
adcq %r10, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r11 ; \
sbbq %rbx, %r12 ; \
sbbq %rbp, %r13 ; \
sbbq $0x0, %r14 ; \
sbbq $0x0, %r15 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x18+P2, %rdx ; \
xorl %r10d, %r10d ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
adoxq %r10, %r9 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcq %rax, %r8 ; \
adcq %rbx, %r9 ; \
adcq %r10, %r10 ; \
movq %r11, %rdx ; \
shlq $0x20, %rdx ; \
addq %r11, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r11, %rbx ; \
adcq %r11, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r12 ; \
sbbq %rbx, %r13 ; \
sbbq %rbp, %r14 ; \
sbbq $0x0, %r15 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P2, %rdx ; \
xorl %r11d, %r11d ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
adoxq %r11, %r10 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcq %rax, %r9 ; \
adcq %rbx, %r10 ; \
adcq %r11, %r11 ; \
movq %r12, %rdx ; \
shlq $0x20, %rdx ; \
addq %r12, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r12, %rbx ; \
adcq %r12, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r13 ; \
sbbq %rbx, %r14 ; \
sbbq %rbp, %r15 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x28+P2, %rdx ; \
xorl %r12d, %r12d ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
adoxq %r12, %r11 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcq %rax, %r10 ; \
adcq %rbx, %r11 ; \
adcq %r12, %r12 ; \
movq %r13, %rdx ; \
shlq $0x20, %rdx ; \
addq %r13, %rdx ; \
xorl %ebp, %ebp ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rbx, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %r13, %rbx ; \
adcq %r13, %rax ; \
adcq %rdx, %rbx ; \
adcl %ebp, %ebp ; \
subq %rax, %r14 ; \
sbbq %rbx, %r15 ; \
sbbq %rbp, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %r10 ; \
sbbq $0x0, %rdx ; \
addq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorl %edx, %edx ; \
xorl %ebp, %ebp ; \
xorl %r13d, %r13d ; \
movq $0xffffffff00000001, %rax ; \
addq %r14, %rax ; \
movl $0xffffffff, %ebx ; \
adcq %r15, %rbx ; \
movl $0x1, %ecx ; \
adcq %r8, %rcx ; \
adcq %r9, %rdx ; \
adcq %r10, %rbp ; \
adcq %r11, %r13 ; \
adcq $0x0, %r12 ; \
cmovne %rax, %r14 ; \
cmovne %rbx, %r15 ; \
cmovne %rcx, %r8 ; \
cmovne %rdx, %r9 ; \
cmovne %rbp, %r10 ; \
cmovne %r13, %r11 ; \
movq %r14, P0 ; \
movq %r15, 0x8+P0 ; \
movq %r8, 0x10+P0 ; \
movq %r9, 0x18+P0 ; \
movq %r10, 0x20+P0 ; \
movq %r11, 0x28+P0
// Corresponds exactly to bignum_montsqr_p384
#define montsqr_p384(P0,P1) \
movq P1, %rdx ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
mulxq 0x28+P1, %r13, %r14 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x20+P1, %r15, %rcx ; \
xorl %ebp, %ebp ; \
movq 0x10+P1, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq 0x8+P1, %rdx ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
adcxq %rbp, %r15 ; \
adoxq %rbp, %rcx ; \
adcq %rbp, %rcx ; \
xorl %ebp, %ebp ; \
movq 0x20+P1, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x28+P1, %rax, %rdx ; \
adcxq %rax, %r15 ; \
adoxq %rdx, %rcx ; \
movq 0x28+P1, %rdx ; \
mulxq 0x20+P1, %rbx, %rbp ; \
mulxq 0x18+P1, %rax, %rdx ; \
adcxq %rax, %rcx ; \
adoxq %rdx, %rbx ; \
movl $0x0, %eax ; \
adcxq %rax, %rbx ; \
adoxq %rax, %rbp ; \
adcq %rax, %rbp ; \
xorq %rax, %rax ; \
movq P1, %rdx ; \
mulxq P1, %r8, %rax ; \
adcxq %r9, %r9 ; \
adoxq %rax, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %r15, %r15 ; \
adoxq %rdx, %r15 ; \
movq 0x20+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %rcx, %rcx ; \
adoxq %rax, %rcx ; \
adcxq %rbx, %rbx ; \
adoxq %rdx, %rbx ; \
movq 0x28+P1, %rdx ; \
mulxq %rdx, %rax, %rdi ; \
adcxq %rbp, %rbp ; \
adoxq %rax, %rbp ; \
movl $0x0, %eax ; \
adcxq %rax, %rdi ; \
adoxq %rax, %rdi ; \
movq %rbx, P0 ; \
movq %r8, %rdx ; \
shlq $0x20, %rdx ; \
addq %r8, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r8, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r8 ; \
addq %rbx, %rax ; \
adcq %rdx, %r8 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r9 ; \
sbbq %r8, %r10 ; \
sbbq %rbx, %r11 ; \
sbbq $0x0, %r12 ; \
sbbq $0x0, %r13 ; \
movq %rdx, %r8 ; \
sbbq $0x0, %r8 ; \
movq %r9, %rdx ; \
shlq $0x20, %rdx ; \
addq %r9, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r9, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r9 ; \
addq %rbx, %rax ; \
adcq %rdx, %r9 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r10 ; \
sbbq %r9, %r11 ; \
sbbq %rbx, %r12 ; \
sbbq $0x0, %r13 ; \
sbbq $0x0, %r8 ; \
movq %rdx, %r9 ; \
sbbq $0x0, %r9 ; \
movq %r10, %rdx ; \
shlq $0x20, %rdx ; \
addq %r10, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r10, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r10 ; \
addq %rbx, %rax ; \
adcq %rdx, %r10 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r11 ; \
sbbq %r10, %r12 ; \
sbbq %rbx, %r13 ; \
sbbq $0x0, %r8 ; \
sbbq $0x0, %r9 ; \
movq %rdx, %r10 ; \
sbbq $0x0, %r10 ; \
movq %r11, %rdx ; \
shlq $0x20, %rdx ; \
addq %r11, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r11, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r11 ; \
addq %rbx, %rax ; \
adcq %rdx, %r11 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r12 ; \
sbbq %r11, %r13 ; \
sbbq %rbx, %r8 ; \
sbbq $0x0, %r9 ; \
sbbq $0x0, %r10 ; \
movq %rdx, %r11 ; \
sbbq $0x0, %r11 ; \
movq %r12, %rdx ; \
shlq $0x20, %rdx ; \
addq %r12, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r12, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r12 ; \
addq %rbx, %rax ; \
adcq %rdx, %r12 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r13 ; \
sbbq %r12, %r8 ; \
sbbq %rbx, %r9 ; \
sbbq $0x0, %r10 ; \
sbbq $0x0, %r11 ; \
movq %rdx, %r12 ; \
sbbq $0x0, %r12 ; \
movq %r13, %rdx ; \
shlq $0x20, %rdx ; \
addq %r13, %rdx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %r13, %rax ; \
movl $0xffffffff, %ebx ; \
mulxq %rbx, %rbx, %r13 ; \
addq %rbx, %rax ; \
adcq %rdx, %r13 ; \
movl $0x0, %ebx ; \
adcq %rbx, %rbx ; \
subq %rax, %r8 ; \
sbbq %r13, %r9 ; \
sbbq %rbx, %r10 ; \
sbbq $0x0, %r11 ; \
sbbq $0x0, %r12 ; \
movq %rdx, %r13 ; \
sbbq $0x0, %r13 ; \
movq P0, %rbx ; \
addq %r8, %r14 ; \
adcq %r9, %r15 ; \
adcq %r10, %rcx ; \
adcq %r11, %rbx ; \
adcq %r12, %rbp ; \
adcq %r13, %rdi ; \
movl $0x0, %r8d ; \
adcq %r8, %r8 ; \
xorq %r11, %r11 ; \
xorq %r12, %r12 ; \
xorq %r13, %r13 ; \
movq $0xffffffff00000001, %rax ; \
addq %r14, %rax ; \
movl $0xffffffff, %r9d ; \
adcq %r15, %r9 ; \
movl $0x1, %r10d ; \
adcq %rcx, %r10 ; \
adcq %rbx, %r11 ; \
adcq %rbp, %r12 ; \
adcq %rdi, %r13 ; \
adcq $0x0, %r8 ; \
cmovne %rax, %r14 ; \
cmovne %r9, %r15 ; \
cmovne %r10, %rcx ; \
cmovne %r11, %rbx ; \
cmovne %r12, %rbp ; \
cmovne %r13, %rdi ; \
movq %r14, P0 ; \
movq %r15, 0x8+P0 ; \
movq %rcx, 0x10+P0 ; \
movq %rbx, 0x18+P0 ; \
movq %rbp, 0x20+P0 ; \
movq %rdi, 0x28+P0
#define sub_p384(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rdx ; \
sbbq 0x8+P2, %rdx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
sbbq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
sbbq 0x28+P2, %r11 ; \
sbbq %rcx, %rcx ; \
movl $0xffffffff, %ebx ; \
andq %rbx, %rcx ; \
xorq %rbx, %rbx ; \
subq %rcx, %rbx ; \
subq %rbx, %rax ; \
movq %rax, P0 ; \
sbbq %rcx, %rdx ; \
movq %rdx, 0x8+P0 ; \
sbbq %rax, %rax ; \
andq %rbx, %rcx ; \
negq %rax; \
sbbq %rcx, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0
// Simplified bignum_add_p384, without carry chain suspension
#define add_p384(P0,P1,P2) \
movq P1, %rax ; \
addq P2, %rax ; \
movq 0x8+P1, %rcx ; \
adcq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
adcq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
adcq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
adcq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
adcq 0x28+P2, %r11 ; \
movl $0x0, %edx ; \
adcq %rdx, %rdx ; \
movq $0xffffffff00000001, %rbp ; \
addq %rbp, %rax ; \
movl $0xffffffff, %ebp ; \
adcq %rbp, %rcx ; \
adcq $0x1, %r8 ; \
adcq $0x0, %r9 ; \
adcq $0x0, %r10 ; \
adcq $0x0, %r11 ; \
adcq $0xffffffffffffffff, %rdx ; \
movl $1, %ebx ; \
andq %rdx, %rbx ; \
andq %rbp, %rdx ; \
xorq %rbp, %rbp ; \
subq %rdx, %rbp ; \
subq %rbp, %rax ; \
movq %rax, P0 ; \
sbbq %rdx, %rcx ; \
movq %rcx, 0x8+P0 ; \
sbbq %rbx, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0
// P0 = 4 * P1 - P2
#define cmsub41_p384(P0,P1,P2) \
movq 40+P1, %rdx ; \
movq %rdx, %r13 ; \
shrq $62, %rdx ; \
movq 32+P1, %r12 ; \
shldq $2, %r12, %r13 ; \
movq 24+P1, %r11 ; \
shldq $2, %r11, %r12 ; \
movq 16+P1, %r10 ; \
shldq $2, %r10, %r11 ; \
movq 8+P1, %r9 ; \
shldq $2, %r9, %r10 ; \
movq P1, %r8 ; \
shldq $2, %r8, %r9 ; \
shlq $2, %r8 ; \
addq $1, %rdx ; \
subq P2, %r8 ; \
sbbq 0x8+P2, %r9 ; \
sbbq 0x10+P2, %r10 ; \
sbbq 0x18+P2, %r11 ; \
sbbq 0x20+P2, %r12 ; \
sbbq 0x28+P2, %r13 ; \
sbbq $0, %rdx ; \
xorq %rcx, %rcx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
movl $0xffffffff, %eax ; \
mulxq %rax, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
adcxq %rdx, %r10 ; \
movl $0x0, %eax ; \
movl $0x0, %ecx ; \
adoxq %rax, %rax ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
adcq %rcx, %r13 ; \
adcq %rcx, %rcx ; \
subq $0x1, %rcx ; \
movl $0xffffffff, %edx ; \
xorq %rax, %rax ; \
andq %rcx, %rdx ; \
subq %rdx, %rax ; \
andq $0x1, %rcx ; \
subq %rax, %r8 ; \
movq %r8, P0 ; \
sbbq %rdx, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq %rcx, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0
// P0 = C * P1 - D * P2
#define cmsub_p384(P0,C,P1,D,P2) \
movq $0x00000000ffffffff, %r8 ; \
subq P2, %r8 ; \
movq $0xffffffff00000000, %r9 ; \
sbbq 8+P2, %r9 ; \
movq $0xfffffffffffffffe, %r10 ; \
sbbq 16+P2, %r10 ; \
movq $0xffffffffffffffff, %r11 ; \
sbbq 24+P2, %r11 ; \
movq $0xffffffffffffffff, %r12 ; \
sbbq 32+P2, %r12 ; \
movq $0xffffffffffffffff, %r13 ; \
sbbq 40+P2, %r13 ; \
movq $D, %rdx ; \
mulxq %r8, %r8, %rax ; \
mulxq %r9, %r9, %rcx ; \
addq %rax, %r9 ; \
mulxq %r10, %r10, %rax ; \
adcq %rcx, %r10 ; \
mulxq %r11, %r11, %rcx ; \
adcq %rax, %r11 ; \
mulxq %r12, %r12, %rax ; \
adcq %rcx, %r12 ; \
mulxq %r13, %r13, %r14 ; \
adcq %rax, %r13 ; \
adcq $1, %r14 ; \
xorl %ecx, %ecx ; \
movq $C, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 8+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 16+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 24+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 32+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 40+P1, %rax, %rdx ; \
adcxq %rax, %r13 ; \
adoxq %r14, %rdx ; \
adcxq %rcx, %rdx ; \
xorq %rcx, %rcx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
movl $0xffffffff, %eax ; \
mulxq %rax, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
adcxq %rdx, %r10 ; \
movl $0x0, %eax ; \
movl $0x0, %ecx ; \
adoxq %rax, %rax ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
adcq %rcx, %r13 ; \
adcq %rcx, %rcx ; \
subq $0x1, %rcx ; \
movl $0xffffffff, %edx ; \
xorq %rax, %rax ; \
andq %rcx, %rdx ; \
subq %rdx, %rax ; \
andq $0x1, %rcx ; \
subq %rax, %r8 ; \
movq %r8, P0 ; \
sbbq %rdx, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq %rcx, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0
// A weak version of add that only guarantees sum in 6 digits
#define weakadd_p384(P0,P1,P2) \
movq P1, %rax ; \
addq P2, %rax ; \
movq 0x8+P1, %rcx ; \
adcq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
adcq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
adcq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
adcq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
adcq 0x28+P2, %r11 ; \
sbbq %rdx, %rdx ; \
movl $1, %ebx ; \
andq %rdx, %rbx ; \
movl $0xffffffff, %ebp ; \
andq %rbp, %rdx ; \
xorq %rbp, %rbp ; \
subq %rdx, %rbp ; \
addq %rbp, %rax ; \
movq %rax, P0 ; \
adcq %rdx, %rcx ; \
movq %rcx, 0x8+P0 ; \
adcq %rbx, %r8 ; \
movq %r8, 0x10+P0 ; \
adcq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
adcq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
adcq $0x0, %r11 ; \
movq %r11, 0x28+P0
// P0 = 3 * P1 - 8 * P2
#define cmsub38_p384(P0,P1,P2) \
movq $0x00000000ffffffff, %r8 ; \
subq P2, %r8 ; \
movq $0xffffffff00000000, %r9 ; \
sbbq 8+P2, %r9 ; \
movq $0xfffffffffffffffe, %r10 ; \
sbbq 16+P2, %r10 ; \
movq $0xffffffffffffffff, %r11 ; \
sbbq 24+P2, %r11 ; \
movq $0xffffffffffffffff, %r12 ; \
sbbq 32+P2, %r12 ; \
movq $0xffffffffffffffff, %r13 ; \
sbbq 40+P2, %r13 ; \
movq %r13, %r14 ; \
shrq $61, %r14 ; \
shldq $3, %r12, %r13 ; \
shldq $3, %r11, %r12 ; \
shldq $3, %r10, %r11 ; \
shldq $3, %r9, %r10 ; \
shldq $3, %r8, %r9 ; \
shlq $3, %r8 ; \
addq $1, %r14 ; \
xorl %ecx, %ecx ; \
movq $3, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 8+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 16+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 24+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 32+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 40+P1, %rax, %rdx ; \
adcxq %rax, %r13 ; \
adoxq %r14, %rdx ; \
adcxq %rcx, %rdx ; \
xorq %rcx, %rcx ; \
movq $0xffffffff00000001, %rax ; \
mulxq %rax, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
movl $0xffffffff, %eax ; \
mulxq %rax, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
adcxq %rdx, %r10 ; \
movl $0x0, %eax ; \
movl $0x0, %ecx ; \
adoxq %rax, %rax ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
adcq %rcx, %r13 ; \
adcq %rcx, %rcx ; \
subq $0x1, %rcx ; \
movl $0xffffffff, %edx ; \
xorq %rax, %rax ; \
andq %rcx, %rdx ; \
subq %rdx, %rax ; \
andq $0x1, %rcx ; \
subq %rax, %r8 ; \
movq %r8, P0 ; \
sbbq %rdx, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq %rcx, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0
S2N_BN_SYMBOL(p384_montjdouble):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room on stack for temporary variables
// Save the output pointer %rdi which gets overwritten in earlier
// operations before it is used.
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rdi, input_z
// Main code, just a sequence of basic field operations
// z2 = z^2
// y2 = y^2
montsqr_p384(z2,z_1)
montsqr_p384(y2,y_1)
// x2p = x^2 - z^4 = (x + z^2) * (x - z^2)
weakadd_p384(t1,x_1,z2)
sub_p384(t2,x_1,z2)
montmul_p384(x2p,t1,t2)
// t1 = y + z
// x4p = x2p^2
// xy2 = x * y^2
add_p384(t1,y_1,z_1)
montsqr_p384(x4p,x2p)
montmul_p384(xy2,x_1,y2)
// t2 = (y + z)^2
montsqr_p384(t2,t1)
// d = 12 * xy2 - 9 * x4p
// t1 = y^2 + 2 * y * z
cmsub_p384(d,12,xy2,9,x4p)
sub_p384(t1,t2,z2)
// y4 = y^4
montsqr_p384(y4,y2)
// Restore the output pointer to write to x_3, y_3 and z_3.
movq input_z, %rdi
// z_3' = 2 * y * z
// dx2 = d * x2p
sub_p384(z_3,t1,y2)
montmul_p384(dx2,d,x2p)
// x' = 4 * xy2 - d
cmsub41_p384(x_3,xy2,d)
// y' = 3 * dx2 - 8 * y4
cmsub38_p384(y_3,dx2,y4)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p384_montjdouble)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 5,046
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_sqr_p256k1_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Square modulo p_256k1, z := (x^2) mod p_256k1
// Input x[4]; output z[4]
//
// extern void bignum_sqr_p256k1_alt(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p256k1_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_p256k1_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p256k1_alt)
.text
#define z %rdi
#define x %rsi
// Re-use input pointer later for constant
#define d %rsi
#define c %rcx
// Macro for the key "multiply and add to (c,h,l)" step, for square term
#define combadd1(c,h,l,numa) \
movq numa, %rax ; \
mulq %rax; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
// A short form where we don't expect a top carry
#define combads(h,l,numa) \
movq numa, %rax ; \
mulq %rax; \
addq %rax, l ; \
adcq %rdx, h
// A version doubling before adding, for non-square terms
#define combadd2(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0, c ; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
S2N_BN_SYMBOL(bignum_sqr_p256k1_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Result term 0
movq (x), %rax
mulq %rax
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
// Result term 1
xorq %r11, %r11
combadd2(%r11,%r10,%r9,(x),8(x))
// Result term 2
xorq %r12, %r12
combadd1(%r12,%r11,%r10,8(x))
combadd2(%r12,%r11,%r10,(x),16(x))
// Result term 3
xorq %r13, %r13
combadd2(%r13,%r12,%r11,(x),24(x))
combadd2(%r13,%r12,%r11,8(x),16(x))
// Result term 4
xorq %r14, %r14
combadd2(%r14,%r13,%r12,8(x),24(x))
combadd1(%r14,%r13,%r12,16(x))
// Result term 5
xorq %r15, %r15
combadd2(%r15,%r14,%r13,16(x),24(x))
// Result term 6
combads(%r15,%r14,24(x))
// Now we have the full 8-digit product 2^256 * h + l where
// h = [%r15,%r14,%r13,%r12] and l = [%r11,%r10,%r9,%r8]
// and this is == 4294968273 * h + l (mod p_256k1)
movq $4294968273, d
movq %r12, %rax
mulq d
addq %rax, %r8
adcq %rdx, %r9
sbbq c, c
movq %r13, %rax
mulq d
subq c, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq c, c
movq %r14, %rax
mulq d
subq c, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq c, c
movq %r15, %rax
mulq d
subq c, %rdx
xorq c, c
addq %rax, %r11
movq %rdx, %r12
adcq c, %r12
// Now we have reduced to 5 digits, 2^256 * h + l = [%r12,%r11,%r10,%r9,%r8]
// Use q = h + 1 as the initial quotient estimate, either right or 1 too big.
leaq 1(%r12), %rax
mulq d
addq %rax, %r8
adcq %rdx, %r9
adcq c, %r10
adcq c, %r11
// Now the effective answer is 2^256 * (CF - 1) + [%r11,%r10,%r9,%r8]
// So we correct if CF = 0 by subtracting 4294968273, i.e. by
// adding p_256k1 to the "full" answer
sbbq %rax, %rax
notq %rax
andq d, %rax
subq %rax, %r8
sbbq c, %r9
sbbq c, %r10
sbbq c, %r11
// Write everything back
movq %r8, (z)
movq %r9, 8(z)
movq %r10, 16(z)
movq %r11, 24(z)
// Restore registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_sqr_p256k1_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 37,504
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/secp256k1_jdouble_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on SECG curve secp256k1 in Jacobian coordinates
//
// extern void secp256k1_jdouble_alt(uint64_t p3[static 12],
// const uint64_t p1[static 12]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input point are fully
// reduced mod p_256k1 and that the z coordinate is not zero.
//
// Standard x86-64 ABI: RDI = p3, RSI = p1
// Microsoft x64 ABI: RCX = p3, RDX = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jdouble_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jdouble_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jdouble_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1, which is true when the
// arguments come in initially and is not disturbed throughout.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define x_2 (NUMSIZE*0)(%rsp)
#define y_2 (NUMSIZE*1)(%rsp)
#define d (NUMSIZE*2)(%rsp)
#define tmp (NUMSIZE*3)(%rsp)
#define x_4 (NUMSIZE*4)(%rsp)
#define y_4 (NUMSIZE*6)(%rsp)
#define dx2 (NUMSIZE*8)(%rsp)
#define xy2 (NUMSIZE*10)(%rsp)
#define NSPACE NUMSIZE*12
// Corresponds to bignum_mul_p256k1_alt except %rsi -> %rbx
#define mul_p256k1(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq $0x1000003d1, %rbx ; \
movq %r12, %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rcx, %rcx ; \
movq %r13, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rcx, %rcx ; \
movq %r14, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq %r15, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
xorq %rcx, %rcx ; \
addq %rax, %r11 ; \
movq %rdx, %r12 ; \
adcq %rcx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rcx, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rax, %rax ; \
notq %rax; \
andq %rbx, %rax ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rcx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Corresponds to bignum_sqr_p256k1_alt except for %rsi -> %rbx
#define sqr_p256k1(P0,P1) \
movq P1, %rax ; \
mulq %rax; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r11 ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq %rax; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r12 ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r13 ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r13 ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r14 ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r15 ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq $0x1000003d1, %rbx ; \
movq %r12, %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rcx, %rcx ; \
movq %r13, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rcx, %rcx ; \
movq %r14, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq %r15, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
xorq %rcx, %rcx ; \
addq %rax, %r11 ; \
movq %rdx, %r12 ; \
adcq %rcx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rcx, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rax, %rax ; \
notq %rax; \
andq %rbx, %rax ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rcx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Rough versions producing 5-word results
#define roughmul_p256k1(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq $0x1000003d1, %rbx ; \
movq %r12, %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rcx, %rcx ; \
movq %r13, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rcx, %rcx ; \
movq %r14, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq %r15, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
xorq %rcx, %rcx ; \
addq %rax, %r11 ; \
movq %rdx, %r12 ; \
adcq %rcx, %r12 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0 ; \
movq %r12, 0x20+P0
#define roughsqr_p256k1(P0,P1) \
movq P1, %rax ; \
mulq %rax; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r11 ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq %rax; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r12 ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r13 ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r13 ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r14 ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r15 ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq $0x1000003d1, %rbx ; \
movq %r12, %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rcx, %rcx ; \
movq %r13, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rcx, %rcx ; \
movq %r14, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq %r15, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
xorq %rcx, %rcx ; \
addq %rax, %r11 ; \
movq %rdx, %r12 ; \
adcq %rcx, %r12 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0 ; \
movq %r12, 0x20+P0
// Weak doubling operation, staying in 4 digits but not in general
// fully normalizing
#define weakdouble_p256k1(P0,P1) \
movq 24+P1, %r11 ; \
movq 16+P1, %r10 ; \
movq $0x1000003d1, %rax ; \
xorq %rdx, %rdx ; \
shldq $1, %r10, %r11 ; \
cmovncq %rdx, %rax ; \
movq 8+P1, %r9 ; \
shldq $1, %r9, %r10 ; \
movq P1, %r8 ; \
shldq $1, %r8, %r9 ; \
shlq $1, %r8 ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rdx, %r10 ; \
adcq %rdx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 8+P0 ; \
movq %r10, 16+P0 ; \
movq %r11, 24+P0
// P0 = C * P1 - D * P2 with 5-word inputs P1 and P2
// Only used here with C = 12, D = 9, but could be used more generally.
// We actually compute C * P1 + D * (2^33 * p_256k1 - P2)
#define cmsub_p256k1(P0,C,P1,D,P2) \
movq $0xfffff85e00000000, %r9 ; \
subq P2, %r9 ; \
movq $0xfffffffffffffffd, %r10 ; \
sbbq 8+P2, %r10 ; \
movq $0xffffffffffffffff, %r11 ; \
sbbq 16+P2, %r11 ; \
movq $0xffffffffffffffff, %r12 ; \
sbbq 24+P2, %r12 ; \
movq $0x00000001ffffffff, %r13 ; \
sbbq 32+P2, %r13 ; \
movq $D, %rcx ; \
movq %r9, %rax ; \
mulq %rcx; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
movq %r10, %rax ; \
xorl %r10d, %r10d ; \
mulq %rcx; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq %r11, %rax ; \
xorl %r11d, %r11d ; \
mulq %rcx; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
movq %r12, %rax ; \
xorl %r12d, %r12d ; \
mulq %rcx; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
imulq %r13, %rcx ; \
addq %rcx, %r12 ; \
movq $C, %rcx ; \
movq P1, %rax ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rbx, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rbx, %rbx ; \
movq 0x10+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbx, %rbx ; \
movq 0x18+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
movq 0x20+P1, %rax ; \
mulq %rcx; \
addq %rax, %r12 ; \
leaq 0x1(%r12), %rax ; \
movq $0x1000003d1, %rcx ; \
mulq %rcx; \
xorl %ebx, %ebx ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rbx, %r10 ; \
adcq %rbx, %r11 ; \
cmovbq %rbx, %rcx ; \
subq %rcx, %r8 ; \
movq %r8, P0 ; \
sbbq %rbx, %r9 ; \
movq %r9, 8+P0 ; \
sbbq %rbx, %r10 ; \
movq %r10, 16+P0 ; \
sbbq %rbx, %r11 ; \
movq %r11, 24+P0 ; \
// P0 = 3 * P1 - 8 * P2 with 5-digit P1 and P2
// We actually compute 3 * P1 + (2^33 * p_256k1 - P2) << 3
#define cmsub38_p256k1(P0,P1,P2) \
movq $0xfffff85e00000000, %r8 ; \
subq P2, %r8 ; \
movq $0xfffffffffffffffd, %r9 ; \
sbbq 8+P2, %r9 ; \
movq $0xffffffffffffffff, %r10 ; \
sbbq 16+P2, %r10 ; \
movq $0xffffffffffffffff, %r11 ; \
sbbq 24+P2, %r11 ; \
movq $0x00000001ffffffff, %r12 ; \
sbbq 32+P2, %r12 ; \
shldq $3, %r11, %r12 ; \
shldq $3, %r10, %r11 ; \
shldq $3, %r9, %r10 ; \
shldq $3, %r8, %r9 ; \
shlq $3, %r8 ; \
movl $3, %ecx ; \
movq P1, %rax ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rbx, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rbx, %rbx ; \
movq 0x10+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbx, %rbx ; \
movq 0x18+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
movq 0x20+P1, %rax ; \
mulq %rcx; \
addq %rax, %r12 ; \
leaq 0x1(%r12), %rax ; \
movq $0x1000003d1, %rcx ; \
mulq %rcx; \
xorl %ebx, %ebx ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rbx, %r10 ; \
adcq %rbx, %r11 ; \
cmovbq %rbx, %rcx ; \
subq %rcx, %r8 ; \
movq %r8, P0 ; \
sbbq %rbx, %r9 ; \
movq %r9, 8+P0 ; \
sbbq %rbx, %r10 ; \
movq %r10, 16+P0 ; \
sbbq %rbx, %r11 ; \
movq %r11, 24+P0 ; \
// P0 = 4 * P1 - P2 with 5-digit P1, 4-digit P2 and result.
// This is done by direct subtraction of P2 since the method
// in bignum_cmul_p256k1 etc. for quotient estimation still
// works when the value to be reduced is negative, as
// long as it is > -p_256k1, which is the case here.
#define cmsub41_p256k1(P0,P1,P2) \
movq 32+P1, %r12 ; \
movq 24+P1, %r11 ; \
shldq $2, %r11, %r12 ; \
movq 16+P1, %r10 ; \
shldq $2, %r10, %r11 ; \
movq 8+P1, %r9 ; \
shldq $2, %r9, %r10 ; \
movq P1, %r8 ; \
shldq $2, %r8, %r9 ; \
shlq $2, %r8 ; \
subq P2, %r8 ; \
sbbq 8+P2, %r9 ; \
sbbq 16+P2, %r10 ; \
sbbq 24+P2, %r11 ; \
sbbq $0, %r12 ; \
leaq 0x1(%r12), %rax ; \
movq $0x1000003d1, %rcx ; \
mulq %rcx; \
xorq %rbx, %rbx ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
adcq $0x0, %r11 ; \
cmovbq %rbx, %rcx ; \
subq %rcx, %r8 ; \
movq %r8, P0 ; \
sbbq %rbx, %r9 ; \
movq %r9, 8+P0 ; \
sbbq %rbx, %r10 ; \
movq %r10, 16+P0 ; \
sbbq %rbx, %r11 ; \
movq %r11, 24+P0 ; \
S2N_BN_SYMBOL(secp256k1_jdouble_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room on stack for temporary variables
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Main sequence of operations
// y_2 = y^2
sqr_p256k1(y_2,y_1)
// x_2 = x^2
sqr_p256k1(x_2,x_1)
// tmp = 2 * y_1 (in 4 words but not fully normalized)
weakdouble_p256k1(tmp,y_1)
// xy2 = x * y^2 (5-digit partially reduced)
// x_4 = x^4 (5-digit partially reduced)
roughmul_p256k1(xy2,x_1,y_2)
roughsqr_p256k1(x_4,x_2)
// z_3 = 2 * y_1 * z_1
mul_p256k1(z_3,z_1,tmp)
// d = 12 * xy2 - 9 * x_4
cmsub_p256k1(d,12,xy2,9,x_4)
// y4 = y2^2 (5-digit partially reduced)
roughsqr_p256k1(y_4,y_2)
// dx2 = d * x_2 (5-digit partially reduced)
roughmul_p256k1(dx2,x_2,d)
// x_3 = 4 * xy2 - d
cmsub41_p256k1(x_3,xy2,d)
// y_3 = 3 * dx2 - 8 * y_4
cmsub38_p256k1(y_3,dx2,y_4)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(secp256k1_jdouble_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 5,849
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_montsqr_p256k1_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery square, z := (x^2 / 2^256) mod p_256k1
// Input x[4]; output z[4]
//
// extern void bignum_montsqr_p256k1_alt(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Does z := (x^2 / 2^256) mod p_256k1, assuming x^2 <= 2^256 * p_256k1, which
// is guaranteed in particular if x < p_256k1 initially (the "intended" case).
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p256k1_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p256k1_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p256k1_alt)
.text
#define z %rdi
#define x %rsi
// Re-used for constants in second part
#define w %rsi
// Macro for the key "multiply and add to (c,h,l)" step, for square term
#define combadd1(c,h,l,numa) \
movq numa, %rax ; \
mulq %rax; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
// A short form where we don't expect a top carry
#define combads(h,l,numa) \
movq numa, %rax ; \
mulq %rax; \
addq %rax, l ; \
adcq %rdx, h
// A version doubling before adding, for non-square terms
#define combadd2(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0, c ; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
S2N_BN_SYMBOL(bignum_montsqr_p256k1_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Result term 0
movq (x), %rax
mulq %rax
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
// Result term 1
xorq %r11, %r11
combadd2(%r11,%r10,%r9,(x),8(x))
// Result term 2
xorq %r12, %r12
combadd1(%r12,%r11,%r10,8(x))
combadd2(%r12,%r11,%r10,(x),16(x))
// Result term 3
xorq %r13, %r13
combadd2(%r13,%r12,%r11,(x),24(x))
combadd2(%r13,%r12,%r11,8(x),16(x))
// Result term 4
xorq %r14, %r14
combadd2(%r14,%r13,%r12,8(x),24(x))
combadd1(%r14,%r13,%r12,16(x))
// Result term 5
xorq %r15, %r15
combadd2(%r15,%r14,%r13,16(x),24(x))
// Result term 6
combads(%r15,%r14,24(x))
// Now we have the full 8-digit product 2^256 * h + l where
// h = [%r15,%r14,%r13,%r12] and l = [%r11,%r10,%r9,%r8]
// Do Montgomery reductions, now using %rcx as a carry-saver.
movq $0xd838091dd2253531, w
movq $4294968273, %rbx
// Montgomery reduce row 0
movq %rbx, %rax
imulq w, %r8
mulq %r8
subq %rdx, %r9
sbbq %rcx, %rcx
// Montgomery reduce row 1
movq %rbx, %rax
imulq w, %r9
mulq %r9
negq %rcx
sbbq %rdx, %r10
sbbq %rcx, %rcx
// Montgomery reduce row 2
movq %rbx, %rax
imulq w, %r10
mulq %r10
negq %rcx
sbbq %rdx, %r11
sbbq %rcx, %rcx
// Montgomery reduce row 3
movq %rbx, %rax
imulq w, %r11
mulq %r11
negq %rcx
// Now [%r15,%r14,%r13,%r12] := [%r15,%r14,%r13,%r12] + [%r11,%r10,%r9,%r8] - (%rdx + CF)
sbbq %rdx, %r8
sbbq $0, %r9
sbbq $0, %r10
sbbq $0, %r11
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
sbbq w, w
// Let b be the top carry captured just above as w = (2^64-1) * b
// Now if [b,%r15,%r14,%r13,%r12] >= p_256k1, subtract p_256k1, i.e. add 4294968273
// and either way throw away the top word. [b,%r15,%r14,%r13,%r12] - p_256k1 =
// [(b - 1),%r15,%r14,%r13,%r12] + 4294968273. If [%r15,%r14,%r13,%r12] + 4294968273
// gives carry flag CF then >= comparison is top = 0 <=> b - 1 + CF = 0 which
// is equivalent to b \/ CF, and so to (2^64-1) * b + (2^64 - 1) + CF >= 2^64
movq %r12, %r8
addq %rbx, %r8
movq %r13, %r9
adcq $0, %r9
movq %r14, %r10
adcq $0, %r10
movq %r15, %r11
adcq $0, %r11
adcq $-1, w
// Write everything back
cmovcq %r8, %r12
movq %r12, (z)
cmovcq %r9, %r13
movq %r13, 8(z)
cmovcq %r10, %r14
movq %r14, 16(z)
cmovcq %r11, %r15
movq %r15, 24(z)
// Restore registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p256k1_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,770
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_cmul_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply by a single word modulo p_256k1, z := (c * x) mod p_256k1, assuming
// x reduced
// Inputs c, x[4]; output z[4]
//
// extern void bignum_cmul_p256k1(uint64_t z[static 4], uint64_t c,
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = c, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = c, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p256k1)
.text
#define z %rdi
// Temporarily moved here for initial multiply
#define x %rcx
#define c %rcx
// Likewise this is thrown away after initial multiply
#define d %rdx
#define h %rdx
#define a %rax
#define ashort %eax
#define q %rax
#define d0 %rsi
#define d1 %r8
#define d2 %r9
#define d3 %r10
S2N_BN_SYMBOL(bignum_cmul_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Shuffle inputs (since we want multiplier in %rdx)
movq %rdx, x
movq %rsi, d
// Multiply, accumulating the result as 2^256 * h + [d3;d2;d1;d0]
mulxq (x), d0, d1
mulxq 8(x), a, d2
addq a, d1
mulxq 16(x), a, d3
adcq a, d2
mulxq 24(x), a, h
adcq a, d3
adcq $0, h
// Now the quotient estimate is q = h + 1, and then we do the reduction,
// writing z = [d3;d2;d1;d0], as z' = (2^256 * h + z) - q * p_256k1 =
// (2^256 * h + z) - q * (2^256 - 4294968273) = -2^256 + (z + 4294968273 * q)
leaq 1(h), q
movq $4294968273, c
mulq c
addq %rax, d0
adcq %rdx, d1
adcq $0, d2
adcq $0, d3
// Because of the implicit -2^256, CF means >= 0 so z' is the answer; ~CF
// means z' < 0 so we add p_256k1, which in 4 digits means subtracting c.
movq $0, a
cmovcq a, c
subq c, d0
movq d0, (z)
sbbq a, d1
movq d1, 8(z)
sbbq a, d2
movq d2, 16(z)
sbbq a, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_cmul_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,317
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_half_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Halve modulo p_256k1, z := (x / 2) mod p_256k1, assuming x reduced
// Input x[4]; output z[4]
//
// extern void bignum_half_p256k1(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_half_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_half_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_half_p256k1)
.text
#define z %rdi
#define x %rsi
#define d0 %rcx
#define d1 %rdx
#define d2 %r8
#define d3 %r9
#define c %rax
S2N_BN_SYMBOL(bignum_half_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load the 4 digits of x, and letting b be the LSB (whether it's odd)
// construct the constant c = 4294968273 * b
movq (x), d0
movq $4294968273, c
movq 8(x), d1
movq $1, d3
andq d0, d3
movq 16(x), d2
cmovzq d3, c
movq 24(x), d3
// We want (x + b * p_256k1) / 2 where b is that LSB, in {0,1}.
// That amounts to (2^256 * b + x - 4294968273 * b) / 2, and
// modulo 4 words that's the same as ([2^256 * c + x] - c) / 2.
// So do that subtraction and shift a place right as we go.
subq c, d0
sbbq $0, d1
sbbq $0, d2
sbbq $0, d3
sbbq $0, c
// Shift right, pushing the carry back down, and store back
shrdq $1, d1, d0
movq d0, (z)
shrdq $1, d2, d1
movq d1, 8(z)
shrdq $1, d3, d2
movq d2, 16(z)
shrdq $1, c, d3
movq d3, 24(z)
// Return
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_half_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 15,791
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/secp256k1_jmixadd.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point mixed addition on SECG curve secp256k1 in Jacobian coordinates
//
// extern void secp256k1_jmixadd(uint64_t p3[static 12],
// const uint64_t p1[static 12],
// const uint64_t p2[static 8]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// The "mixed" part means that p2 only has x and y coordinates, with the
// implicit z coordinate assumed to be the identity. It is assumed that
// all the coordinates of the input points p1 and p2 are fully reduced
// mod p_256k1, that the z coordinate of p1 is nonzero and that neither
// p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents the same affine
// point as".
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jmixadd)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jmixadd)
S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jmixadd)
.text
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1 and %rbp = p2,
// all of which are maintained throughout the code.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_2 0(%rbp)
#define y_2 NUMSIZE(%rbp)
#define z_2 (2*NUMSIZE)(%rbp)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define zp2 (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define NSPACE NUMSIZE*6
// Corresponds exactly to bignum_mul_p256k1
#define mul_p256k1(P0,P1,P2) \
xorl %ecx, %ecx ; \
movq P2, %rdx ; \
mulxq P1, %r8, %r9 ; \
mulxq 0x8+P1, %rax, %r10 ; \
addq %rax, %r9 ; \
mulxq 0x10+P1, %rax, %r11 ; \
adcq %rax, %r10 ; \
mulxq 0x18+P1, %rax, %r12 ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
xorl %ecx, %ecx ; \
movq 0x8+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rcx, %r13 ; \
xorl %ecx, %ecx ; \
movq 0x10+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
adcxq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
movq 0x18+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %r15 ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
adcxq %rcx, %r15 ; \
movabs $0x1000003d1, %rdx ; \
xorl %ecx, %ecx ; \
mulxq %r12, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq %r13, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq %r14, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq %r15, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
adcxq %rcx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulxq %rax, %rax, %rbx ; \
addq %rax, %r8 ; \
adcq %rbx, %r9 ; \
adcq %rcx, %r10 ; \
adcq %rcx, %r11 ; \
cmovbq %rcx, %rdx ; \
subq %rdx, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rcx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Corresponds exactly to bignum_sqr_p256k1
#define sqr_p256k1(P0,P1) \
movq P1, %rdx ; \
mulxq %rdx, %r8, %r15 ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %r13, %r14 ; \
xorl %ebx, %ebx ; \
mulxq P1, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 0x8+P1, %rax, %rcx ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x8+P1, %rax, %rcx ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rbx, %r13 ; \
adoxq %rbx, %r14 ; \
adcq %rbx, %r14 ; \
xorl %ebx, %ebx ; \
adcxq %r9, %r9 ; \
adoxq %r15, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %rbx, %r15 ; \
adoxq %rbx, %r15 ; \
movabs $0x1000003d1, %rdx ; \
xorl %ebx, %ebx ; \
mulxq %r12, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
mulxq %r13, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
mulxq %r14, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq %r15, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
adcxq %rbx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulxq %rax, %rax, %rcx ; \
addq %rax, %r8 ; \
adcq %rcx, %r9 ; \
adcq %rbx, %r10 ; \
adcq %rbx, %r11 ; \
sbbq %rax, %rax ; \
notq %rax; \
andq %rdx, %rax ; \
subq %rax, %r8 ; \
sbbq %rbx, %r9 ; \
sbbq %rbx, %r10 ; \
sbbq %rbx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Corresponds exactly to bignum_sub_p256k1
#define sub_p256k1(P0,P1,P2) \
xorl %eax, %eax ; \
movq P1, %r8 ; \
subq P2, %r8 ; \
movq 0x8+P1, %r9 ; \
sbbq 0x8+P2, %r9 ; \
movq 0x10+P1, %r10 ; \
sbbq 0x10+P2, %r10 ; \
movq 0x18+P1, %r11 ; \
sbbq 0x18+P2, %r11 ; \
movabs $0x1000003d1, %rcx ; \
cmovae %rax, %rcx ; \
subq %rcx, %r8 ; \
movq %r8, P0 ; \
sbbq %rax, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq %rax, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq %rax, %r11 ; \
movq %r11, 0x18+P0
// Additional macros to help with final multiplexing
#define testzero4(P) \
movq P, %rax ; \
movq 8+P, %rdx ; \
orq 16+P, %rax ; \
orq 24+P, %rdx ; \
orq %rdx, %rax
#define mux4(r0,r1,r2,r3,PNE,PEQ) \
movq PNE, r0 ; \
movq PEQ, %rax ; \
cmovzq %rax, r0 ; \
movq 8+PNE, r1 ; \
movq 8+PEQ, %rax ; \
cmovzq %rax, r1 ; \
movq 16+PNE, r2 ; \
movq 16+PEQ, %rax ; \
cmovzq %rax, r2 ; \
movq 24+PNE, r3 ; \
movq 24+PEQ, %rax ; \
cmovzq %rax, r3
#define load4(r0,r1,r2,r3,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3
#define store4(P,r0,r1,r2,r3) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P
S2N_BN_SYMBOL(secp256k1_jmixadd):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
// Put the input y in %rbp where it stays
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rdx, %rbp
// Main code, just a sequence of basic field operations
sqr_p256k1(zp2,z_1)
mul_p256k1(y2a,z_1,y_2)
mul_p256k1(x2a,zp2,x_2)
mul_p256k1(y2a,zp2,y2a)
sub_p256k1(xd,x2a,x_1)
sub_p256k1(yd,y2a,y_1)
sqr_p256k1(zz,xd)
sqr_p256k1(ww,yd)
mul_p256k1(zzx1,zz,x_1)
mul_p256k1(zzx2,zz,x2a)
sub_p256k1(resx,ww,zzx1)
sub_p256k1(t1,zzx2,zzx1)
mul_p256k1(resz,xd,z_1)
sub_p256k1(resx,resx,zzx2)
sub_p256k1(t2,zzx1,resx)
mul_p256k1(t1,t1,y_1)
mul_p256k1(t2,yd,t2)
sub_p256k1(resy,t2,t1)
// Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence)
testzero4(z_1)
// Multiplex: if p1 <> 0 just copy the computed result from the staging area.
// If p1 = 0 then return the point p2 augmented with an extra z = 1
// coordinate, hence giving 0 + p2 = p2 for the final result.
mux4(%r8,%r9,%r10,%r11,resx,x_2)
mux4(%r12,%r13,%r14,%r15,resy,y_2)
store4(x_3,%r8,%r9,%r10,%r11)
store4(y_3,%r12,%r13,%r14,%r15)
load4(%r8,%r9,%r10,%r11,resz)
movl $1, %eax
cmovzq %rax, %r8
movl $0, %eax
cmovzq %rax, %r9
cmovzq %rax, %r10
cmovzq %rax, %r11
store4(z_3,%r8,%r9,%r10,%r11)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(secp256k1_jmixadd)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 19,601
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/secp256k1_jmixadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point mixed addition on SECG curve secp256k1 in Jacobian coordinates
//
// extern void secp256k1_jmixadd_alt(uint64_t p3[static 12],
// const uint64_t p1[static 12],
// const uint64_t p2[static 8]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// The "mixed" part means that p2 only has x and y coordinates, with the
// implicit z coordinate assumed to be the identity. It is assumed that
// all the coordinates of the input points p1 and p2 are fully reduced
// mod p_256k1, that the z coordinate of p1 is nonzero and that neither
// p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents the same affine
// point as".
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jmixadd_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jmixadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jmixadd_alt)
.text
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1 and %rbp = p2,
// all of which are maintained throughout the code.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_2 0(%rbp)
#define y_2 NUMSIZE(%rbp)
#define z_2 (2*NUMSIZE)(%rbp)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define zp2 (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define NSPACE NUMSIZE*6
// Corresponds to bignum_mul_p256k1_alt except %rsi -> %rbx
#define mul_p256k1(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq $0x1000003d1, %rbx ; \
movq %r12, %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rcx, %rcx ; \
movq %r13, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rcx, %rcx ; \
movq %r14, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq %r15, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
xorq %rcx, %rcx ; \
addq %rax, %r11 ; \
movq %rdx, %r12 ; \
adcq %rcx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rcx, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rax, %rax ; \
notq %rax; \
andq %rbx, %rax ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rcx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Corresponds to bignum_sqr_p256k1_alt except for %rsi -> %rbx
#define sqr_p256k1(P0,P1) \
movq P1, %rax ; \
mulq %rax; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r11 ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq %rax; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r12 ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r13 ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r13 ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r14 ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r15 ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq $0x1000003d1, %rbx ; \
movq %r12, %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rcx, %rcx ; \
movq %r13, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rcx, %rcx ; \
movq %r14, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq %r15, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
xorq %rcx, %rcx ; \
addq %rax, %r11 ; \
movq %rdx, %r12 ; \
adcq %rcx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rcx, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rax, %rax ; \
notq %rax; \
andq %rbx, %rax ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rcx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Corresponds exactly to bignum_sub_p256k1
#define sub_p256k1(P0,P1,P2) \
xorl %eax, %eax ; \
movq P1, %r8 ; \
subq P2, %r8 ; \
movq 0x8+P1, %r9 ; \
sbbq 0x8+P2, %r9 ; \
movq 0x10+P1, %r10 ; \
sbbq 0x10+P2, %r10 ; \
movq 0x18+P1, %r11 ; \
sbbq 0x18+P2, %r11 ; \
movabs $0x1000003d1, %rcx ; \
cmovae %rax, %rcx ; \
subq %rcx, %r8 ; \
movq %r8, P0 ; \
sbbq %rax, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq %rax, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq %rax, %r11 ; \
movq %r11, 0x18+P0
// Additional macros to help with final multiplexing
#define testzero4(P) \
movq P, %rax ; \
movq 8+P, %rdx ; \
orq 16+P, %rax ; \
orq 24+P, %rdx ; \
orq %rdx, %rax
#define mux4(r0,r1,r2,r3,PNE,PEQ) \
movq PNE, r0 ; \
movq PEQ, %rax ; \
cmovzq %rax, r0 ; \
movq 8+PNE, r1 ; \
movq 8+PEQ, %rax ; \
cmovzq %rax, r1 ; \
movq 16+PNE, r2 ; \
movq 16+PEQ, %rax ; \
cmovzq %rax, r2 ; \
movq 24+PNE, r3 ; \
movq 24+PEQ, %rax ; \
cmovzq %rax, r3
#define load4(r0,r1,r2,r3,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3
#define store4(P,r0,r1,r2,r3) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P
S2N_BN_SYMBOL(secp256k1_jmixadd_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
// Put the input y in %rbp where it stays
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rdx, %rbp
// Main code, just a sequence of basic field operations
sqr_p256k1(zp2,z_1)
mul_p256k1(y2a,z_1,y_2)
mul_p256k1(x2a,zp2,x_2)
mul_p256k1(y2a,zp2,y2a)
sub_p256k1(xd,x2a,x_1)
sub_p256k1(yd,y2a,y_1)
sqr_p256k1(zz,xd)
sqr_p256k1(ww,yd)
mul_p256k1(zzx1,zz,x_1)
mul_p256k1(zzx2,zz,x2a)
sub_p256k1(resx,ww,zzx1)
sub_p256k1(t1,zzx2,zzx1)
mul_p256k1(resz,xd,z_1)
sub_p256k1(resx,resx,zzx2)
sub_p256k1(t2,zzx1,resx)
mul_p256k1(t1,t1,y_1)
mul_p256k1(t2,yd,t2)
sub_p256k1(resy,t2,t1)
// Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence)
testzero4(z_1)
// Multiplex: if p1 <> 0 just copy the computed result from the staging area.
// If p1 = 0 then return the point p2 augmented with an extra z = 1
// coordinate, hence giving 0 + p2 = p2 for the final result.
mux4(%r8,%r9,%r10,%r11,resx,x_2)
mux4(%r12,%r13,%r14,%r15,resy,y_2)
store4(x_3,%r8,%r9,%r10,%r11)
store4(y_3,%r12,%r13,%r14,%r15)
load4(%r8,%r9,%r10,%r11,resz)
movl $1, %eax
cmovzq %rax, %r8
movl $0, %eax
cmovzq %rax, %r9
cmovzq %rax, %r10
cmovzq %rax, %r11
store4(z_3,%r8,%r9,%r10,%r11)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(secp256k1_jmixadd_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 5,823
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_montsqr_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery square, z := (x^2 / 2^256) mod p_256k1
// Input x[4]; output z[4]
//
// extern void bignum_montsqr_p256k1(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Does z := (x^2 / 2^256) mod p_256k1, assuming x^2 <= 2^256 * p_256k1, which
// is guaranteed in particular if x < p_256k1 initially (the "intended" case).
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p256k1)
.text
#define z %rdi
#define x %rsi
// Use this fairly consistently for a zero
#define zero %rbp
#define zeroe %ebp
// Also use the same register for multiplicative inverse in Montgomery stage
#define w %rbp
// Add %rdx * m into a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax and %rbx as temporaries
#define mulpadd(high,low,m) \
mulxq m, %rax, %rbx ; \
adcxq %rax, low ; \
adoxq %rbx, high
S2N_BN_SYMBOL(bignum_montsqr_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Compute [%r15;%r8] = [00] which we use later, but mainly
// set up an initial window [%r14;...;%r9] = [23;03;01]
movq (x), %rdx
mulxq %rdx, %r8, %r15
mulxq 8(x), %r9, %r10
mulxq 24(x), %r11, %r12
movq 16(x), %rdx
mulxq 24(x), %r13, %r14
// Clear our zero register, and also initialize the flags for the carry chain
xorl zeroe, zeroe
// Chain in the addition of 02 + 12 + 13 to that window (no carry-out possible)
// This gives all the "heterogeneous" terms of the squaring ready to double
mulpadd(%r11,%r10,(x))
mulpadd(%r12,%r11,8(x))
movq 24(x), %rdx
mulpadd(%r13,%r12,8(x))
adcxq zero, %r13
adoxq zero, %r14
adcq zero, %r14
// Double and add to the 00 + 11 + 22 + 33 terms
xorl zeroe, zeroe
adcxq %r9, %r9
adoxq %r15, %r9
movq 8(x), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 16(x), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 24(x), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq zero, %r15
adoxq zero, %r15
// Now we have the full 8-digit square 2^256 * h + l where
// h = [%r15,%r14,%r13,%r12] and l = [%r11,%r10,%r9,%r8]
// Do Montgomery reductions, now using %rcx as a carry save
movq $0xd838091dd2253531, w
movq $4294968273, %rbx
// Montgomery reduce row 0
movq %rbx, %rax
imulq w, %r8
mulq %r8
subq %rdx, %r9
sbbq %rcx, %rcx
// Montgomery reduce row 1
movq %rbx, %rax
imulq w, %r9
mulq %r9
negq %rcx
sbbq %rdx, %r10
sbbq %rcx, %rcx
// Montgomery reduce row 2
movq %rbx, %rax
imulq w, %r10
mulq %r10
negq %rcx
sbbq %rdx, %r11
sbbq %rcx, %rcx
// Montgomery reduce row 3
movq %rbx, %rax
imulq w, %r11
mulq %r11
negq %rcx
// Now [%r15,%r14,%r13,%r12] := [%r15,%r14,%r13,%r12] + [%r11,%r10,%r9,%r8] - (%rdx + CF)
sbbq %rdx, %r8
sbbq $0, %r9
sbbq $0, %r10
sbbq $0, %r11
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
sbbq w, w
// Let b be the top carry captured just above as w = (2^64-1) * b
// Now if [b,%r15,%r14,%r13,%r12] >= p_256k1, subtract p_256k1, i.e. add 4294968273
// and either way throw away the top word. [b,%r15,%r14,%r13,%r12] - p_256k1 =
// [(b - 1),%r15,%r14,%r13,%r12] + 4294968273. If [%r15,%r14,%r13,%r12] + 4294968273
// gives carry flag CF then >= comparison is top = 0 <=> b - 1 + CF = 0 which
// is equivalent to b \/ CF, and so to (2^64-1) * b + (2^64 - 1) + CF >= 2^64
movq %r12, %r8
addq %rbx, %r8
movq %r13, %r9
adcq $0, %r9
movq %r14, %r10
adcq $0, %r10
movq %r15, %r11
adcq $0, %r11
adcq $-1, w
// Write everything back
cmovcq %r8, %r12
movq %r12, (z)
cmovcq %r9, %r13
movq %r13, 8(z)
cmovcq %r10, %r14
movq %r14, 16(z)
cmovcq %r11, %r15
movq %r15, 24(z)
// Restore saved registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,544
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_mod_n256k1_4.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo group order, z := x mod n_256k1
// Input x[4]; output z[4]
//
// extern void bignum_mod_n256k1_4(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Reduction is modulo the group order of the secp256k1 curve.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n256k1_4)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_n256k1_4)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n256k1_4)
.text
#define z %rdi
#define x %rsi
#define d0 %rdx
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define n0 %rax
#define n1 %r10
#define n2 %r11
#define n2short %r11d
// Can re-use this as a temporary once we've loaded the input
#define c %rsi
S2N_BN_SYMBOL(bignum_mod_n256k1_4):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load a set of registers [0; n2; n1; n0] = 2^256 - n_256k1
movq $0x402da1732fc9bebf, n0
movq $0x4551231950b75fc4, n1
movl $1, n2short
// Load the input and compute x + (2^256 - n_256k1)
movq (x), d0
addq n0, d0
movq 8(x), d1
adcq n1, d1
movq 16(x), d2
adcq n2, d2
movq 24(x), d3
adcq $0, d3
// Now CF is set iff 2^256 <= x + (2^256 - n_256k1), i.e. iff n_256k1 <= x.
// Create a mask for the condition x < n, and mask the three nontrivial digits
// ready to undo the previous addition with a compensating subtraction
sbbq c, c
notq c
andq c, n0
andq c, n1
andq c, n2
// Now subtract mask * (2^256 - n_256k1) again and store
subq n0, d0
movq d0, (z)
sbbq n1, d1
movq d1, 8(z)
sbbq n2, d2
movq d2, 16(z)
sbbq $0, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mod_n256k1_4)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,942
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_tomont_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert to Montgomery form z := (2^256 * x) mod p_256k1
// Input x[4]; output z[4]
//
// extern void bignum_tomont_p256k1(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p256k1)
.text
#define z %rdi
#define x %rsi
#define d %rdx
#define a %rax
#define ashort %eax
#define q %rax
#define d0 %rcx
#define d1 %r8
#define d2 %r9
#define d3 %r10
// Re-use the x argument later on when it's no longer needed
#define h %rsi
#define c %rsi
S2N_BN_SYMBOL(bignum_tomont_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Since 2^256 == 4294968273 (mod p_256k1) we more or less just set
// m = 4294968273 then devolve to a variant of bignum_cmul_p256k1;
// the logic that q = h + 1 < 2^64 and hence doesn't wrap still holds
// since the multiplier 4294968273 is known to be much less than 2^64.
// We keep this constant in %rdx throughout as it's used repeatedly.
movq $4294968273, d
// Multiply, accumulating the result as 2^256 * h + [d3;d2;d1;d0]
// But immediately add 1 to h to get q = h + 1 as the quotient estimate.
mulxq (x), d0, d1
mulxq 8(x), a, d2
addq a, d1
mulxq 16(x), a, d3
adcq a, d2
mulxq 24(x), a, h
adcq a, d3
adcq $1, h
// Now the quotient estimate is q = h + 1, and then we do the reduction,
// writing z = [d3;d2;d1;d0], as z' = (2^256 * h + z) - q * p_256k1 =
// (2^256 * h + z) - q * (2^256 - 4294968273) = -2^256 + (z + 4294968273 * q)
mulxq h, a, c
addq a, d0
adcq c, d1
adcq $0, d2
adcq $0, d3
// Because of the implicit -2^256, CF means >= 0 so z' is the answer; ~CF
// means z' < 0 so we add p_256k1, which in 4 digits means subtracting c.
movq $0, a
cmovcq a, d
subq d, d0
movq d0, (z)
sbbq a, d1
movq d1, 8(z)
sbbq a, d2
movq d2, 16(z)
sbbq a, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_tomont_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,983
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_cmul_p256k1_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply by a single word modulo p_256k1, z := (c * x) mod p_256k1, assuming
// x reduced
// Inputs c, x[4]; output z[4]
//
// extern void bignum_cmul_p256k1_alt(uint64_t z[static 4], uint64_t c,
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = c, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = c, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p256k1_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p256k1_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p256k1_alt)
.text
#define z %rdi
// Temporarily moved here for initial multiply
#define x %rcx
#define c %rsi
// Likewise this is thrown away after initial multiply
#define d %rdx
#define h %rdx
#define a %rax
#define ashort %eax
#define q %rax
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %rcx
S2N_BN_SYMBOL(bignum_cmul_p256k1_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Shuffle inputs (since we want %rdx for the high parts of products)
movq %rdx, x
// Multiply, accumulating the result as 2^256 * h + [d3;d2;d1;d0]
movq (x), a
mulq c
movq a, d0
movq d, d1
movq 8(x), a
xorq d2, d2
mulq c
addq a, d1
adcq d, d2
movq 16(x), a
mulq c
addq a, d2
adcq $0, d
movq 24(x), a
movq d, d3
mulq c
addq a, d3
adcq $0, h
// Now the quotient estimate is q = h + 1, and then we do the reduction,
// writing z = [d3;d2;d1;d0], as z' = (2^256 * h + z) - q * p_256k1 =
// (2^256 * h + z) - q * (2^256 - 4294968273) = -2^256 + (z + 4294968273 * q)
leaq 1(h), q
movq $4294968273, c
mulq c
addq %rax, d0
adcq %rdx, d1
adcq $0, d2
adcq $0, d3
// Because of the implicit -2^256, CF means >= 0 so z' is the answer; ~CF
// means z' < 0 so we add p_256k1, which in 4 digits means subtracting c.
movq $0, a
cmovcq a, c
subq c, d0
movq d0, (z)
sbbq a, d1
movq d1, 8(z)
sbbq a, d2
movq d2, 16(z)
sbbq a, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_cmul_p256k1_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 6,314
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_montmul_p256k1_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery multiply, z := (x * y / 2^256) mod p_256k1
// Inputs x[4], y[4]; output z[4]
//
// extern void bignum_montmul_p256k1_alt(uint64_t z[static 4],
// const uint64_t x[static 4],
// const uint64_t y[static 4]);
//
// Does z := (2^{-256} * x * y) mod p_256k1, assuming that the inputs x and y
// satisfy x * y <= 2^256 * p_256k2 (in particular this is true if we are in
// the "usual" case x < p_256k1 and y < p_256k1).
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p256k1_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p256k1_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p256k1_alt)
.text
// These are actually right
#define z %rdi
#define x %rsi
// Copied in or set up
#define y %rcx
// Re-used for constants in second part
#define w %rsi
// Macro for the key "multiply and add to (c,h,l)" step
#define combadd(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
// A minutely shorter form for when c = 0 initially
#define combadz(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq c, c
// A short form where we don't expect a top carry
#define combads(h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h
S2N_BN_SYMBOL(bignum_montmul_p256k1_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Copy y into a safe register to start with
movq %rdx, y
// Start the window as [%r10;%r9;%r8] with 00 product
movq (x), %rax
mulq (y)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
// Column 1
xorq %r11, %r11
combads(%r10,%r9,(x),8(y))
combadz(%r11,%r10,%r9,8(x),(y))
// Column 2
xorq %r12, %r12
combadz(%r12,%r11,%r10,(x),16(y))
combadd(%r12,%r11,%r10,8(x),8(y))
combadd(%r12,%r11,%r10,16(x),(y))
// Column 3
xorq %r13, %r13
combadz(%r13,%r12,%r11,(x),24(y))
combadd(%r13,%r12,%r11,8(x),16(y))
combadd(%r13,%r12,%r11,16(x),8(y))
combadd(%r13,%r12,%r11,24(x),(y))
// Column 4
xorq %r14, %r14
combadz(%r14,%r13,%r12,8(x),24(y))
combadd(%r14,%r13,%r12,16(x),16(y))
combadd(%r14,%r13,%r12,24(x),8(y))
// Column 5
xorq %r15, %r15
combadz(%r15,%r14,%r13,16(x),24(y))
combadd(%r15,%r14,%r13,24(x),16(y))
// Final work for columns 6 and 7
movq 24(x), %rax
mulq 24(y)
addq %rax, %r14
adcq %rdx, %r15
// Now we have the full 8-digit product 2^256 * h + l where
// h = [%r15,%r14,%r13,%r12] and l = [%r11,%r10,%r9,%r8]
// Do Montgomery reductions, now using %rcx as a carry-saver.
movq $0xd838091dd2253531, w
movq $4294968273, %rbx
// Montgomery reduce row 0
movq %rbx, %rax
imulq w, %r8
mulq %r8
subq %rdx, %r9
sbbq %rcx, %rcx
// Montgomery reduce row 1
movq %rbx, %rax
imulq w, %r9
mulq %r9
negq %rcx
sbbq %rdx, %r10
sbbq %rcx, %rcx
// Montgomery reduce row 2
movq %rbx, %rax
imulq w, %r10
mulq %r10
negq %rcx
sbbq %rdx, %r11
sbbq %rcx, %rcx
// Montgomery reduce row 3
movq %rbx, %rax
imulq w, %r11
mulq %r11
negq %rcx
// Now [%r15,%r14,%r13,%r12] := [%r15,%r14,%r13,%r12] + [%r11,%r10,%r9,%r8] - (%rdx + CF)
sbbq %rdx, %r8
sbbq $0, %r9
sbbq $0, %r10
sbbq $0, %r11
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
sbbq w, w
// Let b be the top carry captured just above as w = (2^64-1) * b
// Now if [b,%r15,%r14,%r13,%r12] >= p_256k1, subtract p_256k1, i.e. add 4294968273
// and either way throw away the top word. [b,%r15,%r14,%r13,%r12] - p_256k1 =
// [(b - 1),%r15,%r14,%r13,%r12] + 4294968273. If [%r15,%r14,%r13,%r12] + 4294968273
// gives carry flag CF then >= comparison is top = 0 <=> b - 1 + CF = 0 which
// is equivalent to b \/ CF, and so to (2^64-1) * b + (2^64 - 1) + CF >= 2^64
movq %r12, %r8
addq %rbx, %r8
movq %r13, %r9
adcq $0, %r9
movq %r14, %r10
adcq $0, %r10
movq %r15, %r11
adcq $0, %r11
adcq $-1, w
// Write everything back
cmovcq %r8, %r12
movq %r12, (z)
cmovcq %r9, %r13
movq %r13, 8(z)
cmovcq %r10, %r14
movq %r14, 16(z)
cmovcq %r11, %r15
movq %r15, 24(z)
// Restore registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p256k1_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 4,837
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_mul_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply modulo p_256k1, z := (x * y) mod p_256k1
// Inputs x[4], y[4]; output z[4]
//
// extern void bignum_mul_p256k1(uint64_t z[static 4], const uint64_t x[static 4],
// const uint64_t y[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p256k1)
.text
// These are actually right
#define z %rdi
#define x %rsi
// Copied in or set up
#define y %rcx
// A zero register
#define zero %rbp
#define zeroe %ebp
// mulpadd(high,low,m) adds %rdx * m to a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax and %rbx as temporaries.
#define mulpadd(high,low,m) \
mulxq m, %rax, %rbx ; \
adcxq %rax, low ; \
adoxq %rbx, high
// mulpade(high,low,m) adds %rdx * m to a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax as a temporary, assuming high created from scratch
// and that zero has value zero.
#define mulpade(high,low,m) \
mulxq m, %rax, high ; \
adcxq %rax, low ; \
adoxq zero, high
S2N_BN_SYMBOL(bignum_mul_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Copy y into a safe register to start with
movq %rdx, y
// Zero a register, which also makes sure we don't get a fake carry-in
xorl zeroe, zeroe
// Do the zeroth row, which is a bit different
movq (y), %rdx
mulxq (x), %r8, %r9
mulxq 8(x), %rax, %r10
addq %rax, %r9
mulxq 16(x), %rax, %r11
adcq %rax, %r10
mulxq 24(x), %rax, %r12
adcq %rax, %r11
adcq zero, %r12
// Add row 1
xorl zeroe, zeroe
movq 8(y), %rdx
mulpadd(%r10,%r9,(x))
mulpadd(%r11,%r10,8(x))
mulpadd(%r12,%r11,16(x))
mulpade(%r13,%r12,24(x))
adcxq zero, %r13
// Add row 2
xorl zeroe, zeroe
movq 16(y), %rdx
mulpadd(%r11,%r10,(x))
mulpadd(%r12,%r11,8(x))
mulpadd(%r13,%r12,16(x))
mulpade(%r14,%r13,24(x));
adcxq zero, %r14
// Add row 3
xorl zeroe, zeroe
movq 24(y), %rdx
mulpadd(%r12,%r11,(x))
mulpadd(%r13,%r12,8(x))
mulpadd(%r14,%r13,16(x));
mulpade(%r15,%r14,24(x));
adcxq zero, %r15
// Now we have the full 8-digit product 2^256 * h + l where
// h = [%r15,%r14,%r13,%r12] and l = [%r11,%r10,%r9,%r8]
// and this is == 4294968273 * h + l (mod p_256k1)
movq $4294968273, %rdx
xorl zeroe, zeroe
mulpadd(%r9,%r8,%r12)
mulpadd(%r10,%r9,%r13)
mulpadd(%r11,%r10,%r14)
mulpade(%r12,%r11,%r15)
adcxq zero, %r12
// Now we have reduced to 5 digits, 2^256 * h + l = [%r12,%r11,%r10,%r9,%r8]
// Use q = h + 1 as the initial quotient estimate, either right or 1 too big.
leaq 1(%r12), %rax
mulxq %rax, %rax, %rbx
addq %rax, %r8
adcq %rbx, %r9
adcq zero, %r10
adcq zero, %r11
// Now the effective answer is 2^256 * (CF - 1) + [%r11,%r10,%r9,%r8]
// So we correct if CF = 0 by subtracting 4294968273, i.e. by
// adding p_256k1 to the "full" answer
cmovcq zero, %rdx
subq %rdx, %r8
sbbq zero, %r9
sbbq zero, %r10
sbbq zero, %r11
// Write everything back
movq %r8, (z)
movq %r9, 8(z)
movq %r10, 16(z)
movq %r11, 24(z)
// Restore registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mul_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 3,163
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_triple_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Triple modulo p_256k1, z := (3 * x) mod p_256k1
// Input x[4]; output z[4]
//
// extern void bignum_triple_p256k1(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// The input x can be any 4-digit bignum, not necessarily reduced modulo
// p_256k1, and the result is always fully reduced, z = (3 * x) mod p_256k1.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p256k1)
.text
#define z %rdi
#define x %rsi
// Main digits of intermediate results
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
// Quotient estimate = top of product + 1
#define q %rdx
// Other temporary variables and their short version
#define a %rax
#define c %rcx
#define ashort %eax
#define qshort %edx
S2N_BN_SYMBOL(bignum_triple_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// First do the multiplication by 3, getting z = [h; d3; ...; d0]
// but immediately form the quotient estimate q = h + 1
xorl ashort, ashort
movq (x), q
movq q, d0
adcxq q, q
adoxq q, d0
movq 8(x), q
movq q, d1
adcxq q, q
adoxq q, d1
movq 16(x), q
movq q, d2
adcxq q, q
adoxq q, d2
movq 24(x), q
movq q, d3
adcxq q, q
adoxq q, d3
// For this limited range a simple quotient estimate of q = h + 1 works, where
// h = floor(z / 2^256). Then -p_256k1 <= z - q * p_256k1 < p_256k1.
movl $1, qshort
adcxq a, q
adoxq a, q
// Initial subtraction of z - q * p_256k1, actually by adding q * 4294968273.
movq $4294968273, c
xorq a, a
imulq c, q
addq q, d0
adcq a, d1
adcq a, d2
adcq a, d3
// With z = 2^256 * h + l, the underlying result z' is actually
// (2^256 * h + l) - q * (2^256 - 4294968273) = (l + q * 4294968273) - 2^256
// so carry-clear <=> z' is negative. Correct by subtracting in that case.
// In any case, write final result to z as we go.
cmovcq a, c
subq c, d0
movq d0, (z)
sbbq a, d1
movq d1, 8(z)
sbbq a, d2
movq d2, 16(z)
sbbq a, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_triple_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,699
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_add_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Add modulo p_256k1, z := (x + y) mod p_256k1, assuming x and y reduced
// Inputs x[4], y[4]; output z[4]
//
// extern void bignum_add_p256k1(uint64_t z[static 4], const uint64_t x[static 4],
// const uint64_t y[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_add_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_add_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_add_p256k1)
.text
#define z %rdi
#define x %rsi
#define y %rdx
#define d0 %rcx
#define d1 %r8
#define d2 %r9
#define d3 %r10
#define dd %rax
// These two re-use inputs x and y when safe to do so
#define l %rsi
#define c %rdx
S2N_BN_SYMBOL(bignum_add_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Load and add the two inputs as 2^256 * (-c) + [d3;d2;d1;d0] = x + y
movq (x), d0
addq (y), d0
movq 8(x), d1
adcq 8(y), d1
movq 16(x), d2
adcq 16(y), d2
movq 24(x), d3
adcq 24(y), d3
sbbq c, c
// Create dd = d3 AND d2 AND d1 to condense the later comparison
// We hope this will interleave with the addition, though we can't
// express that directly as the AND operation destroys the carry flag.
movq d1, dd
andq d2, dd
andq d3, dd
// Decide whether z >= p_256k1 <=> z + 4294968273 >= 2^256.
// For the lowest word use d0 + 4294968273 >= 2^64 <=> ~4294968273 < d0
movq $~4294968273, l
cmpq d0, l
adcq $0, dd
sbbq $0, c
// Now c <> 0 <=> z >= p_256k1, so mask the constant l accordingly
notq l
cmovzq c, l
// If z >= p_256k1 do z := z - p_256k1, i.e. add l in 4 digits
addq l, d0
movq d0, (z)
adcq $0, d1
movq d1, 8(z)
adcq $0, d2
movq d2, 16(z)
adcq $0, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_add_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,897
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_demont_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from Montgomery form z := (x / 2^256) mod p_256k1,
// assuming x reduced
// Input x[4]; output z[4]
//
// extern void bignum_demont_p256k1(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// This assumes the input is < p_256k1 for correctness. If this is not the
// case, use the variant "bignum_deamont_p256k1" instead.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_demont_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p256k1)
.text
#define z %rdi
#define x %rsi
// Re-use x variable for the negated multiplicative inverse of p_256k1
#define w %rsi
// The rotating registers for the 4 digits
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
// Other variables. We need d == %rdx for mulx instructions
#define a %rax
#define d %rdx
#define c %rcx
S2N_BN_SYMBOL(bignum_demont_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Set up an initial 4-word window [d3,d2,d1,d0] = x
movq (x), d0
movq 8(x), d1
movq 16(x), d2
movq 24(x), d3
// Set w to negated multiplicative inverse p_256k1 * w == -1 (mod 2^64).
movq $0xd838091dd2253531, w
// Four stages of Montgomery reduction, rotating the register window.
// Use c as a carry-catcher since the imul destroys the flags in general.
imulq w, d0
movq $4294968273, a
mulq d0
subq d, d1
sbbq c, c
imulq w, d1
movq $4294968273, a
mulq d1
negq c
sbbq d, d2
sbbq c, c
imulq w, d2
movq $4294968273, a
mulq d2
negq c
sbbq d, d3
sbbq c, c
imulq w, d3
movq $4294968273, a
mulq d3
negq c
sbbq d, d0
// Finish propagating carry through new top part, write back and return
movq d0, (z)
sbbq $0, d1
movq d1, 8(z)
sbbq $0, d2
movq d2, 16(z)
sbbq $0, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_demont_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 4,903
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_sqr_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Square modulo p_256k1, z := (x^2) mod p_256k1
// Input x[4]; output z[4]
//
// extern void bignum_sqr_p256k1(uint64_t z[static 4], const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p256k1)
.text
#define z %rdi
#define x %rsi
// Use this fairly consistently for a zero
#define zero %rbx
#define zeroe %ebx
// Add %rdx * m into a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax and %rcx as temporaries
#define mulpadd(high,low,m) \
mulxq m, %rax, %rcx ; \
adcxq %rax, low ; \
adoxq %rcx, high
// mulpade(high,low,m) adds %rdx * m to a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax as a temporary, assuming high created from scratch
// and that zero has value zero.
#define mulpade(high,low,m) \
mulxq m, %rax, high ; \
adcxq %rax, low ; \
adoxq zero, high
S2N_BN_SYMBOL(bignum_sqr_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Compute [%r15;%r8] = [00] which we use later, but mainly
// set up an initial window [%r14;...;%r9] = [23;03;01]
movq (x), %rdx
mulxq %rdx, %r8, %r15
mulxq 8(x), %r9, %r10
mulxq 24(x), %r11, %r12
movq 16(x), %rdx
mulxq 24(x), %r13, %r14
// Clear our zero register, and also initialize the flags for the carry chain
xorl zeroe, zeroe
// Chain in the addition of 02 + 12 + 13 to that window (no carry-out possible)
// This gives all the "heterogeneous" terms of the squaring ready to double
mulpadd(%r11,%r10,(x))
mulpadd(%r12,%r11,8(x))
movq 24(x), %rdx
mulpadd(%r13,%r12,8(x))
adcxq zero, %r13
adoxq zero, %r14
adcq zero, %r14
// Double and add to the 00 + 11 + 22 + 33 terms
xorl zeroe, zeroe
adcxq %r9, %r9
adoxq %r15, %r9
movq 8(x), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 16(x), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 24(x), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq zero, %r15
adoxq zero, %r15
// Now we have the full 8-digit product 2^256 * h + l where
// h = [%r15,%r14,%r13,%r12] and l = [%r11,%r10,%r9,%r8]
// and this is == 4294968273 * h + l (mod p_256k1)
movq $4294968273, %rdx
xorl zeroe, zeroe
mulpadd(%r9,%r8,%r12)
mulpadd(%r10,%r9,%r13)
mulpadd(%r11,%r10,%r14)
mulpade(%r12,%r11,%r15)
adcxq zero, %r12
// Now we have reduced to 5 digits, 2^256 * h + l = [%r12,%r11,%r10,%r9,%r8]
// Use q = h + 1 as the initial quotient estimate, either right or 1 too big.
leaq 1(%r12), %rax
mulxq %rax, %rax, %rcx
addq %rax, %r8
adcq %rcx, %r9
adcq zero, %r10
adcq zero, %r11
// Now the effective answer is 2^256 * (CF - 1) + [%r11,%r10,%r9,%r8]
// So we correct if CF = 0 by subtracting 4294968273, i.e. by
// adding p_256k1 to the "full" answer
sbbq %rax, %rax
notq %rax
andq %rdx, %rax
subq %rax, %r8
sbbq zero, %r9
sbbq zero, %r10
sbbq zero, %r11
// Write everything back
movq %r8, (z)
movq %r9, 8(z)
movq %r10, 16(z)
movq %r11, 24(z)
// Restore saved registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_sqr_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,207
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_mod_p256k1_4.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo field characteristic, z := x mod p_256k1
// Input x[4]; output z[4]
//
// extern void bignum_mod_p256k1_4(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p256k1_4)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_p256k1_4)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p256k1_4)
.text
#define z %rdi
#define x %rsi
#define d0 %rdx
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define c %r10
#define d %rax
S2N_BN_SYMBOL(bignum_mod_p256k1_4):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load the inputs as [d3;d2;d1;d0] and let d be an AND of [d3;d2;d1] to
// condense the comparison below.
movq (x), d0
movq 8(x), d1
movq d1, d
movq 16(x), d2
andq d2, d
movq 24(x), d3
andq d3, d
// Compare x >= p_256k1 = 2^256 - 4294968273 using condensed carry:
// we get a carry from the lowest digit and all other digits are 1.
// We end up with c and d as adjusted digits for x - p_256k1 if so.
movq $4294968273, c
addq d0, c
adcq $0, d
// If indeed x >= p_256k1 then x := x - p_256k1, using c and d
// Either way, write back to z
cmovcq c, d0
movq d0, (z)
cmovcq d, d1
movq d1, 8(z)
cmovcq d, d2
movq d2, 16(z)
cmovcq d, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mod_p256k1_4)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,060
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_neg_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Negate modulo p_256k1, z := (-x) mod p_256k1, assuming x reduced
// Input x[4]; output z[4]
//
// extern void bignum_neg_p256k1(uint64_t z[static 4], const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_neg_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_p256k1)
.text
#define z %rdi
#define x %rsi
#define q %rdx
#define n0 %rax
#define n1 %rcx
#define n2 %r8
#define n3 %r9
#define c %r10
#define qshort %esi
S2N_BN_SYMBOL(bignum_neg_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load the 4 digits of x and let q be an OR of all the digits
movq (x), n0
movq n0, q
movq 8(x), n1
orq n1, q
movq 16(x), n2
orq n2, q
movq 24(x), n3
orq n3, q
// Turn q into a strict bitmask, and c a masked constant -4294968273
negq q
sbbq q, q
movq $-4294968273, c
andq q, c
// Now just do [2^256 - 4294968273] - x where the constant is masked
subq n0, c
movq c, (z)
movq q, c
sbbq n1, c
movq c, 8(z)
movq q, c
sbbq n2, c
movq c, 16(z)
sbbq n3, q
movq q, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_neg_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 16,337
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/secp256k1_jadd.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on SECG curve secp256k1 in Jacobian coordinates
//
// extern void secp256k1_jadd(uint64_t p3[static 12], const uint64_t p1[static 12],
// const uint64_t p2[static 12]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input points p1 and p2 are
// fully reduced mod p_256k1, that both z coordinates are nonzero and
// that neither p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents
// the same affine point as".
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jadd)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jadd)
S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jadd)
.text
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1 and %rbp = p2,
// all of which are maintained throughout the code.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_2 0(%rbp)
#define y_2 NUMSIZE(%rbp)
#define z_2 (2*NUMSIZE)(%rbp)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z1sq (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define x1a (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define z2sq (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define y1a (NUMSIZE*6)(%rsp)
#define NSPACE NUMSIZE*7
// Corresponds exactly to bignum_mul_p256k1
#define mul_p256k1(P0,P1,P2) \
xorl %ecx, %ecx ; \
movq P2, %rdx ; \
mulxq P1, %r8, %r9 ; \
mulxq 0x8+P1, %rax, %r10 ; \
addq %rax, %r9 ; \
mulxq 0x10+P1, %rax, %r11 ; \
adcq %rax, %r10 ; \
mulxq 0x18+P1, %rax, %r12 ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
xorl %ecx, %ecx ; \
movq 0x8+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rcx, %r13 ; \
xorl %ecx, %ecx ; \
movq 0x10+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
adcxq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
movq 0x18+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %r15 ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
adcxq %rcx, %r15 ; \
movabs $0x1000003d1, %rdx ; \
xorl %ecx, %ecx ; \
mulxq %r12, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq %r13, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq %r14, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq %r15, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
adcxq %rcx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulxq %rax, %rax, %rbx ; \
addq %rax, %r8 ; \
adcq %rbx, %r9 ; \
adcq %rcx, %r10 ; \
adcq %rcx, %r11 ; \
cmovbq %rcx, %rdx ; \
subq %rdx, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rcx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Corresponds exactly to bignum_sqr_p256k1
#define sqr_p256k1(P0,P1) \
movq P1, %rdx ; \
mulxq %rdx, %r8, %r15 ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %r13, %r14 ; \
xorl %ebx, %ebx ; \
mulxq P1, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 0x8+P1, %rax, %rcx ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x8+P1, %rax, %rcx ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rbx, %r13 ; \
adoxq %rbx, %r14 ; \
adcq %rbx, %r14 ; \
xorl %ebx, %ebx ; \
adcxq %r9, %r9 ; \
adoxq %r15, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %rbx, %r15 ; \
adoxq %rbx, %r15 ; \
movabs $0x1000003d1, %rdx ; \
xorl %ebx, %ebx ; \
mulxq %r12, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
mulxq %r13, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
mulxq %r14, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq %r15, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
adcxq %rbx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulxq %rax, %rax, %rcx ; \
addq %rax, %r8 ; \
adcq %rcx, %r9 ; \
adcq %rbx, %r10 ; \
adcq %rbx, %r11 ; \
sbbq %rax, %rax ; \
notq %rax; \
andq %rdx, %rax ; \
subq %rax, %r8 ; \
sbbq %rbx, %r9 ; \
sbbq %rbx, %r10 ; \
sbbq %rbx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Corresponds exactly to bignum_sub_p256k1
#define sub_p256k1(P0,P1,P2) \
xorl %eax, %eax ; \
movq P1, %r8 ; \
subq P2, %r8 ; \
movq 0x8+P1, %r9 ; \
sbbq 0x8+P2, %r9 ; \
movq 0x10+P1, %r10 ; \
sbbq 0x10+P2, %r10 ; \
movq 0x18+P1, %r11 ; \
sbbq 0x18+P2, %r11 ; \
movabs $0x1000003d1, %rcx ; \
cmovae %rax, %rcx ; \
subq %rcx, %r8 ; \
movq %r8, P0 ; \
sbbq %rax, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq %rax, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq %rax, %r11 ; \
movq %r11, 0x18+P0
// Additional macros to help with final multiplexing
#define load4(r0,r1,r2,r3,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3
#define store4(P,r0,r1,r2,r3) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P
#define czload4(r0,r1,r2,r3,P) \
cmovzq P, r0 ; \
cmovzq 8+P, r1 ; \
cmovzq 16+P, r2 ; \
cmovzq 24+P, r3
#define muxload4(r0,r1,r2,r3,P0,P1,P2) \
movq P0, r0 ; \
cmovbq P1, r0 ; \
cmovnbe P2, r0 ; \
movq 8+P0, r1 ; \
cmovbq 8+P1, r1 ; \
cmovnbe 8+P2, r1 ; \
movq 16+P0, r2 ; \
cmovbq 16+P1, r2 ; \
cmovnbe 16+P2, r2 ; \
movq 24+P0, r3 ; \
cmovbq 24+P1, r3 ; \
cmovnbe 24+P2, r3
S2N_BN_SYMBOL(secp256k1_jadd):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
// Put the input y in %rbp where it stays
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rdx, %rbp
// Main code, just a sequence of basic field operations
sqr_p256k1(z1sq,z_1)
sqr_p256k1(z2sq,z_2)
mul_p256k1(y1a,z_2,y_1)
mul_p256k1(y2a,z_1,y_2)
mul_p256k1(x2a,z1sq,x_2)
mul_p256k1(x1a,z2sq,x_1)
mul_p256k1(y2a,z1sq,y2a)
mul_p256k1(y1a,z2sq,y1a)
sub_p256k1(xd,x2a,x1a)
sub_p256k1(yd,y2a,y1a)
sqr_p256k1(zz,xd)
sqr_p256k1(ww,yd)
mul_p256k1(zzx1,zz,x1a)
mul_p256k1(zzx2,zz,x2a)
sub_p256k1(resx,ww,zzx1)
sub_p256k1(t1,zzx2,zzx1)
mul_p256k1(xd,xd,z_1)
sub_p256k1(resx,resx,zzx2)
sub_p256k1(t2,zzx1,resx)
mul_p256k1(t1,t1,y1a)
mul_p256k1(resz,xd,z_2)
mul_p256k1(t2,yd,t2)
sub_p256k1(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "NBE" <=> ~(CF \/ ZF) <=> P1 = 0 /\ ~(P2 = 0)
// and "B" <=> CF <=> ~(P1 = 0) /\ P2 = 0
// and "Z" <=> ZF <=> (P1 = 0 <=> P2 = 0)
load4(%r8,%r9,%r10,%r11,z_1)
movq %r8, %rax
movq %r9, %rdx
orq %r10, %rax
orq %r11, %rdx
orq %rdx, %rax
negq %rax
sbbq %rax, %rax
load4(%r12,%r13,%r14,%r15,z_2)
movq %r12, %rbx
movq %r13, %rdx
orq %r14, %rbx
orq %r15, %rdx
orq %rdx, %rbx
negq %rbx
sbbq %rbx, %rbx
cmpq %rax, %rbx
// Multiplex the outputs accordingly, re-using the z's in registers
cmovbq %r8, %r12
cmovbq %r9, %r13
cmovbq %r10, %r14
cmovbq %r11, %r15
czload4(%r12,%r13,%r14,%r15,resz)
muxload4(%rax,%rbx,%rcx,%rdx,resx,x_1,x_2)
muxload4(%r8,%r9,%r10,%r11,resy,y_1,y_2)
// Finally store back the multiplexed values
store4(x_3,%rax,%rbx,%rcx,%rdx)
store4(y_3,%r8,%r9,%r10,%r11)
store4(z_3,%r12,%r13,%r14,%r15)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(secp256k1_jadd)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 3,050
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_tomont_p256k1_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert to Montgomery form z := (2^256 * x) mod p_256k1
// Input x[4]; output z[4]
//
// extern void bignum_tomont_p256k1_alt(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p256k1_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_p256k1_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p256k1_alt)
.text
#define z %rdi
#define x %rsi
#define c %rcx
#define d %rdx
#define h %rdx
#define a %rax
#define ashort %eax
#define q %rax
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %rsi
S2N_BN_SYMBOL(bignum_tomont_p256k1_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Since 2^256 == 4294968273 (mod p_256k1) we more or less just set
// m = 4294968273 then devolve to a variant of bignum_cmul_p256k1;
// the logic that q = h + 1 < 2^64 and hence doesn't wrap still holds
// since the multiplier 4294968273 is known to be much less than 2^64.
// We keep this constant in %rcx throughout as it's used repeatedly.
movq $4294968273, c
// Multiply, accumulating the result as 2^256 * h + [d3;d2;d1;d0]
movq (x), a
mulq c
movq a, d0
movq d, d1
movq 8(x), a
xorq d2, d2
mulq c
addq a, d1
adcq d, d2
movq 16(x), a
mulq c
addq a, d2
adcq $0, d
movq 24(x), a
movq d, d3
mulq c
addq a, d3
adcq $0, h
// Now the quotient estimate is q = h + 1, and then we do the reduction,
// writing z = [d3;d2;d1;d0], as z' = (2^256 * h + z) - q * p_256k1 =
// (2^256 * h + z) - q * (2^256 - 4294968273) = -2^256 + (z + 4294968273 * q)
leaq 1(h), q
mulq c
addq %rax, d0
adcq %rdx, d1
adcq $0, d2
adcq $0, d3
// Because of the implicit -2^256, CF means >= 0 so z' is the answer; ~CF
// means z' < 0 so we add p_256k1, which in 4 digits means subtracting c.
movq $0, a
cmovcq a, c
subq c, d0
movq d0, (z)
sbbq a, d1
movq d1, 8(z)
sbbq a, d2
movq d2, 16(z)
sbbq a, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_tomont_p256k1_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,466
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_optneg_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Optionally negate modulo p_256k1, z := (-x) mod p_256k1 (if p nonzero) or
// z := x (if p zero), assuming x reduced
// Inputs p, x[4]; output z[4]
//
// extern void bignum_optneg_p256k1(uint64_t z[static 4], uint64_t p,
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = p, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = p, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optneg_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optneg_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optneg_p256k1)
.text
#define z %rdi
#define q %rsi
#define x %rdx
#define n0 %rax
#define n1 %rcx
#define n2 %r8
#define n3 %r9
#define c %r10
#define qshort %esi
S2N_BN_SYMBOL(bignum_optneg_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Load the 4 digits of x and let c be an OR of all the digits
movq (x), n0
movq n0, c
movq 8(x), n1
orq n1, c
movq 16(x), n2
orq n2, c
movq 24(x), n3
orq n3, c
// Turn q into a strict bitmask. Force it to zero if the input is zero,
// to avoid giving -0 = p_256k1, which is not reduced though correct modulo.
cmovzq c, q
negq q
sbbq q, q
// We want z := if q then (2^256 - 4294968273) - x else x
// which is: [if q then ~x else x] - [if q then 4294968272 else 0]
xorq q, n0
xorq q, n1
xorq q, n2
xorq q, n3
movq $4294968272, c
andq q, c
xorl qshort, qshort
subq c, n0
movq n0, (z)
sbbq q, n1
movq n1, 8(z)
sbbq q, n2
movq n2, 16(z)
sbbq q, n3
movq n3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_optneg_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,510
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_double_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Double modulo p_256k1, z := (2 * x) mod p_256k1, assuming x reduced
// Input x[4]; output z[4]
//
// extern void bignum_double_p256k1(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_double_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_double_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_double_p256k1)
.text
#define z %rdi
#define x %rsi
#define d0 %rcx
#define d1 %r8
#define d2 %r9
#define d3 %r10
#define dd %rax
#define c %rdx
// Re-uses the input x when safe to do so
#define l %rsi
S2N_BN_SYMBOL(bignum_double_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load the inputs and double top-down as z = 2^256 * c + [d3;d2;d1;d0]
// While doing this, create an AND dd of [d3;d2;d1] to condense comparison
movq 24(x), d3
movq d3, c
movq 16(x), d2
shrq $63, c
shldq $1, d2, d3
movq d3, dd
movq 8(x), d1
shldq $1, d1, d2
andq d2, dd
movq (x), d0
shldq $1, d0, d1
andq d1, dd
shlq $1, d0
// Decide whether z >= p_256k1 <=> z + 4294968273 >= 2^256.
// For the lowest word use d0 + 4294968273 >= 2^64 <=> ~4294968273 < d0
movq $~4294968273, l
cmpq d0, l
adcq $0, dd
adcq $0, c
// Now c <> 0 <=> z >= p_256k1, so mask the constant l accordingly
notq l
cmovzq c, l
// If z >= p_256k1 do z := z - p_256k1, i.e. add l in 4 digits
addq l, d0
movq d0, (z)
adcq $0, d1
movq d1, 8(z)
adcq $0, d2
movq d2, 16(z)
adcq $0, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_double_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 3,177
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_triple_p256k1_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Triple modulo p_256k1, z := (3 * x) mod p_256k1
// Input x[4]; output z[4]
//
// extern void bignum_triple_p256k1_alt(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// The input x can be any 4-digit bignum, not necessarily reduced modulo
// p_256k1, and the result is always fully reduced, z = (3 * x) mod p_256k1.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p256k1_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p256k1_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p256k1_alt)
.text
#define z %rdi
#define x %rsi
// Main digits of intermediate results
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
// Quotient estimate = top of product + 1
#define d %rdx
#define h %rdx
#define q %rdx
// Other temporary variables and their short version
#define a %rax
#define c %rcx
#define ashort %eax
#define qshort %edx
S2N_BN_SYMBOL(bignum_triple_p256k1_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// First do the multiplication by 3, getting z = [h; d3; ...; d0]
// but immediately form the quotient estimate q = h + 1
movq $3, c
movq (x), a
mulq c
movq a, d0
movq d, d1
movq 8(x), a
xorq d2, d2
mulq c
addq a, d1
adcq d, d2
movq 16(x), a
mulq c
addq a, d2
adcq $0, d
movq 24(x), a
movq d, d3
mulq c
addq a, d3
adcq $1, h
// For this limited range a simple quotient estimate of q = h + 1 works, where
// h = floor(z / 2^256). Then -p_256k1 <= z - q * p_256k1 < p_256k1.
// Initial subtraction of z - q * p_256k1, actually by adding q * 4294968273.
movq $4294968273, c
xorq a, a
imulq c, q
addq q, d0
adcq a, d1
adcq a, d2
adcq a, d3
// With z = 2^256 * h + l, the underlying result z' is actually
// (2^256 * h + l) - q * (2^256 - 4294968273) = (l + q * 4294968273) - 2^256
// so carry-clear <=> z' is negative. Correct by subtracting in that case.
// In any case, write final result to z as we go.
cmovcq a, c
subq c, d0
movq d0, (z)
sbbq a, d1
movq d1, 8(z)
sbbq a, d2
movq d2, 16(z)
sbbq a, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_triple_p256k1_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 20,182
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/secp256k1_jadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on SECG curve secp256k1 in Jacobian coordinates
//
// extern void secp256k1_jadd_alt(uint64_t p3[static 12],
// const uint64_t p1[static 12],
// const uint64_t p2[static 12]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input points p1 and p2 are
// fully reduced mod p_256k1, that both z coordinates are nonzero and
// that neither p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents
// the same affine point as".
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jadd_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jadd_alt)
.text
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1 and %rbp = p2,
// all of which are maintained throughout the code.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_2 0(%rbp)
#define y_2 NUMSIZE(%rbp)
#define z_2 (2*NUMSIZE)(%rbp)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z1sq (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define x1a (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define z2sq (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define y1a (NUMSIZE*6)(%rsp)
#define NSPACE NUMSIZE*7
// Corresponds to bignum_mul_p256k1_alt except %rsi -> %rbx
#define mul_p256k1(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq $0x1000003d1, %rbx ; \
movq %r12, %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rcx, %rcx ; \
movq %r13, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rcx, %rcx ; \
movq %r14, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq %r15, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
xorq %rcx, %rcx ; \
addq %rax, %r11 ; \
movq %rdx, %r12 ; \
adcq %rcx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rcx, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rax, %rax ; \
notq %rax; \
andq %rbx, %rax ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rcx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Corresponds to bignum_sqr_p256k1_alt except for %rsi -> %rbx
#define sqr_p256k1(P0,P1) \
movq P1, %rax ; \
mulq %rax; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r11 ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq %rax; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r12 ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r13 ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r13 ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r14 ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r15 ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq $0x1000003d1, %rbx ; \
movq %r12, %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rcx, %rcx ; \
movq %r13, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rcx, %rcx ; \
movq %r14, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq %r15, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
xorq %rcx, %rcx ; \
addq %rax, %r11 ; \
movq %rdx, %r12 ; \
adcq %rcx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulq %rbx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rcx, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rax, %rax ; \
notq %rax; \
andq %rbx, %rax ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rcx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Corresponds exactly to bignum_sub_p256k1
#define sub_p256k1(P0,P1,P2) \
xorl %eax, %eax ; \
movq P1, %r8 ; \
subq P2, %r8 ; \
movq 0x8+P1, %r9 ; \
sbbq 0x8+P2, %r9 ; \
movq 0x10+P1, %r10 ; \
sbbq 0x10+P2, %r10 ; \
movq 0x18+P1, %r11 ; \
sbbq 0x18+P2, %r11 ; \
movabs $0x1000003d1, %rcx ; \
cmovae %rax, %rcx ; \
subq %rcx, %r8 ; \
movq %r8, P0 ; \
sbbq %rax, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq %rax, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq %rax, %r11 ; \
movq %r11, 0x18+P0
// Additional macros to help with final multiplexing
#define load4(r0,r1,r2,r3,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3
#define store4(P,r0,r1,r2,r3) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P
#define czload4(r0,r1,r2,r3,P) \
cmovzq P, r0 ; \
cmovzq 8+P, r1 ; \
cmovzq 16+P, r2 ; \
cmovzq 24+P, r3
#define muxload4(r0,r1,r2,r3,P0,P1,P2) \
movq P0, r0 ; \
cmovbq P1, r0 ; \
cmovnbe P2, r0 ; \
movq 8+P0, r1 ; \
cmovbq 8+P1, r1 ; \
cmovnbe 8+P2, r1 ; \
movq 16+P0, r2 ; \
cmovbq 16+P1, r2 ; \
cmovnbe 16+P2, r2 ; \
movq 24+P0, r3 ; \
cmovbq 24+P1, r3 ; \
cmovnbe 24+P2, r3
S2N_BN_SYMBOL(secp256k1_jadd_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
// Put the input y in %rbp where it stays
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rdx, %rbp
// Main code, just a sequence of basic field operations
sqr_p256k1(z1sq,z_1)
sqr_p256k1(z2sq,z_2)
mul_p256k1(y1a,z_2,y_1)
mul_p256k1(y2a,z_1,y_2)
mul_p256k1(x2a,z1sq,x_2)
mul_p256k1(x1a,z2sq,x_1)
mul_p256k1(y2a,z1sq,y2a)
mul_p256k1(y1a,z2sq,y1a)
sub_p256k1(xd,x2a,x1a)
sub_p256k1(yd,y2a,y1a)
sqr_p256k1(zz,xd)
sqr_p256k1(ww,yd)
mul_p256k1(zzx1,zz,x1a)
mul_p256k1(zzx2,zz,x2a)
sub_p256k1(resx,ww,zzx1)
sub_p256k1(t1,zzx2,zzx1)
mul_p256k1(xd,xd,z_1)
sub_p256k1(resx,resx,zzx2)
sub_p256k1(t2,zzx1,resx)
mul_p256k1(t1,t1,y1a)
mul_p256k1(resz,xd,z_2)
mul_p256k1(t2,yd,t2)
sub_p256k1(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "NBE" <=> ~(CF \/ ZF) <=> P1 = 0 /\ ~(P2 = 0)
// and "B" <=> CF <=> ~(P1 = 0) /\ P2 = 0
// and "Z" <=> ZF <=> (P1 = 0 <=> P2 = 0)
load4(%r8,%r9,%r10,%r11,z_1)
movq %r8, %rax
movq %r9, %rdx
orq %r10, %rax
orq %r11, %rdx
orq %rdx, %rax
negq %rax
sbbq %rax, %rax
load4(%r12,%r13,%r14,%r15,z_2)
movq %r12, %rbx
movq %r13, %rdx
orq %r14, %rbx
orq %r15, %rdx
orq %rdx, %rbx
negq %rbx
sbbq %rbx, %rbx
cmpq %rax, %rbx
// Multiplex the outputs accordingly, re-using the z's in registers
cmovbq %r8, %r12
cmovbq %r9, %r13
cmovbq %r10, %r14
cmovbq %r11, %r15
czload4(%r12,%r13,%r14,%r15,resz)
muxload4(%rax,%rbx,%rcx,%rdx,resx,x_1,x_2)
muxload4(%r8,%r9,%r10,%r11,resy,y_1,y_2)
// Finally store back the multiplexed values
store4(x_3,%rax,%rbx,%rcx,%rdx)
store4(y_3,%r8,%r9,%r10,%r11)
store4(z_3,%r12,%r13,%r14,%r15)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(secp256k1_jadd_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 4,094
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_deamont_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from Montgomery form z := (x / 2^256) mod p_256k1,
// Input x[4]; output z[4]
//
// extern void bignum_deamont_p256k1(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Convert a 4-digit bignum x out of its (optionally almost) Montgomery form,
// "almost" meaning any 4-digit input will work, with no range restriction.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_deamont_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p256k1)
.text
#define z %rdi
#define x %rsi
// Re-use x variable for the negated multiplicative inverse of p_256k1
#define w %rsi
// The rotating registers for the 4 digits
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
// Other variables. We need d == %rdx for mulx instructions
#define a %rax
#define d %rdx
#define c %rcx
S2N_BN_SYMBOL(bignum_deamont_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Set up an initial 4-word window [d3,d2,d1,d0] = x
movq (x), d0
movq 8(x), d1
movq 16(x), d2
movq 24(x), d3
// Set w to negated multiplicative inverse p_256k1 * w == -1 (mod 2^64).
movq $0xd838091dd2253531, w
// Four stages of Montgomery reduction, rotating the register window.
// Use c as a carry-catcher since the imul destroys the flags in general.
imulq w, d0
movq $4294968273, a
mulq d0
subq d, d1
sbbq c, c
imulq w, d1
movq $4294968273, a
mulq d1
negq c
sbbq d, d2
sbbq c, c
imulq w, d2
movq $4294968273, a
mulq d2
negq c
sbbq d, d3
sbbq c, c
imulq w, d3
movq $4294968273, a
mulq d3
// Take an AND of the four cofactor digits, re-using the w variable.
// We hope this will interleave nicely with the computation sequence
// above but don't want to use other registers explicitly, so put
// it all together in a block.
movq d0, w
andq d1, w
andq d2, w
andq d3, w
// Finish propagating carry through new top part
xorq a, a
negq c
sbbq d, d0
sbbq a, d1
sbbq a, d2
sbbq a, d3
// The result thus far is z = (x + q * p_256k1) / 2^256. Note that
// z < p_256k1 <=> x < (2^256 - q) * p_256k1, and since
// x < 2^256 < 2 * p_256k1, we have that *if* q < 2^256 - 1 then
// z < p_256k1. Conversely if q = 2^256 - 1 then since
// x + q * p_256k1 == 0 (mod 2^256) we have x == p_256k1 (mod 2^256)
// and thus x = p_256k1, and z >= p_256k1 (in fact z = p_256k1).
// So in summary z < p_256k1 <=> ~(q = 2^256 - 1) <=> ~(x = p_256k1).
// and hence iff q is all 1s, or equivalently dd is all 1s, we
// correct by subtracting p_256k1 to get 0. Since this is only one
// case we compute the result more explicitly rather than doing
// arithmetic with carry propagation.
movq $4294968273, d
addq d0, d
addq $1, w
cmovzq d, d0
cmovzq a, d1
cmovzq a, d2
cmovzq a, d3
// write back and return
movq d0, (z)
movq d1, 8(z)
movq d2, 16(z)
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_deamont_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 5,475
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_mul_p256k1_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply modulo p_256k1, z := (x * y) mod p_256k1
// Inputs x[4], y[4]; output z[4]
//
// extern void bignum_mul_p256k1_alt(uint64_t z[static 4],
// const uint64_t x[static 4],
// const uint64_t y[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p256k1_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_p256k1_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p256k1_alt)
.text
// These are actually right
#define z %rdi
#define x %rsi
// Copied in or set up
#define y %rcx
// Re-use input pointers later for constant and top carry
#define d %rsi
#define c %rcx
// Macro for the key "multiply and add to (c,h,l)" step
#define combadd(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
// A minutely shorter form for when c = 0 initially
#define combadz(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq c, c
// A short form where we don't expect a top carry
#define combads(h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h
S2N_BN_SYMBOL(bignum_mul_p256k1_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save more registers to play with
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Copy y into a safe register to start with
movq %rdx, y
// Start the window as [%r10;%r9;%r8] with 00 product
movq (x), %rax
mulq (y)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
// Column 1
xorq %r11, %r11
combads(%r10,%r9,(x),8(y))
combadd(%r11,%r10,%r9,8(x),(y))
// Column 2
xorq %r12, %r12
combadz(%r12,%r11,%r10,(x),16(y))
combadd(%r12,%r11,%r10,8(x),8(y))
combadd(%r12,%r11,%r10,16(x),(y))
// Column 3
xorq %r13, %r13
combadz(%r13,%r12,%r11,(x),24(y))
combadd(%r13,%r12,%r11,8(x),16(y))
combadd(%r13,%r12,%r11,16(x),8(y))
combadd(%r13,%r12,%r11,24(x),(y))
// Column 4
xorq %r14, %r14
combadz(%r14,%r13,%r12,8(x),24(y))
combadd(%r14,%r13,%r12,16(x),16(y))
combadd(%r14,%r13,%r12,24(x),8(y))
// Column 5
xorq %r15, %r15
combadz(%r15,%r14,%r13,16(x),24(y))
combadd(%r15,%r14,%r13,24(x),16(y))
// Final work for columns 6 and 7
movq 24(x), %rax
mulq 24(y)
addq %rax, %r14
adcq %rdx, %r15
// Now we have the full 8-digit product 2^256 * h + l where
// h = [%r15,%r14,%r13,%r12] and l = [%r11,%r10,%r9,%r8]
// and this is == 4294968273 * h + l (mod p_256k1)
movq $4294968273, d
movq %r12, %rax
mulq d
addq %rax, %r8
adcq %rdx, %r9
sbbq c, c
movq %r13, %rax
mulq d
subq c, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq c, c
movq %r14, %rax
mulq d
subq c, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq c, c
movq %r15, %rax
mulq d
subq c, %rdx
xorq c, c
addq %rax, %r11
movq %rdx, %r12
adcq c, %r12
// Now we have reduced to 5 digits, 2^256 * h + l = [%r12,%r11,%r10,%r9,%r8]
// Use q = h + 1 as the initial quotient estimate, either right or 1 too big.
leaq 1(%r12), %rax
mulq d
addq %rax, %r8
adcq %rdx, %r9
adcq c, %r10
adcq c, %r11
// Now the effective answer is 2^256 * (CF - 1) + [%r11,%r10,%r9,%r8]
// So we correct if CF = 0 by subtracting 4294968273, i.e. by
// adding p_256k1 to the "full" answer
sbbq %rax, %rax
notq %rax
andq d, %rax
subq %rax, %r8
sbbq c, %r9
sbbq c, %r10
sbbq c, %r11
// Write everything back
movq %r8, (z)
movq %r9, 8(z)
movq %r10, 16(z)
movq %r11, 24(z)
// Restore registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mul_p256k1_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,338
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_sub_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Subtract modulo p_256k1, z := (x - y) mod p_256k1
// Inputs x[4], y[4]; output z[4]
//
// extern void bignum_sub_p256k1(uint64_t z[static 4], const uint64_t x[static 4],
// const uint64_t y[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sub_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sub_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sub_p256k1)
.text
#define z %rdi
#define x %rsi
#define y %rdx
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
#define zero %rax
#define zeroe %eax
#define c %rcx
S2N_BN_SYMBOL(bignum_sub_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Zero a register first
xorl zeroe, zeroe
// Load and subtract the two inputs as [d3;d2;d1;d0] = x - y (modulo 2^256)
movq (x), d0
subq (y), d0
movq 8(x), d1
sbbq 8(y), d1
movq 16(x), d2
sbbq 16(y), d2
movq 24(x), d3
sbbq 24(y), d3
// Now if x < y we want to add back p_256k1, which staying within 4 digits
// means subtracting 4294968273, since p_256k1 = 2^256 - 4294968273.
// Let c be that constant 4294968273 when x < y, zero otherwise.
movq $4294968273, c
cmovncq zero, c
// Now correct by adding masked p_256k1, i.e. subtracting c, and write back
subq c, d0
movq d0, (z)
sbbq zero, d1
movq d1, 8(z)
sbbq zero, d2
movq d2, 16(z)
sbbq zero, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_sub_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 28,263
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/secp256k1_jdouble.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on SECG curve secp256k1 in Jacobian coordinates
//
// extern void secp256k1_jdouble(uint64_t p3[static 12],
// const uint64_t p1[static 12]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input point are fully
// reduced mod p_256k1 and that the z coordinate is not zero.
//
// Standard x86-64 ABI: RDI = p3, RSI = p1
// Microsoft x64 ABI: RCX = p3, RDX = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jdouble)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jdouble)
S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jdouble)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1, which is true when the
// arguments come in initially and is not disturbed throughout.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define x_2 (NUMSIZE*0)(%rsp)
#define y_2 (NUMSIZE*1)(%rsp)
#define d (NUMSIZE*2)(%rsp)
#define tmp (NUMSIZE*3)(%rsp)
#define x_4 (NUMSIZE*4)(%rsp)
#define y_4 (NUMSIZE*6)(%rsp)
#define dx2 (NUMSIZE*8)(%rsp)
#define xy2 (NUMSIZE*10)(%rsp)
#define NSPACE NUMSIZE*12
// Corresponds exactly to bignum_mul_p256k1
#define mul_p256k1(P0,P1,P2) \
xorl %ecx, %ecx ; \
movq P2, %rdx ; \
mulxq P1, %r8, %r9 ; \
mulxq 0x8+P1, %rax, %r10 ; \
addq %rax, %r9 ; \
mulxq 0x10+P1, %rax, %r11 ; \
adcq %rax, %r10 ; \
mulxq 0x18+P1, %rax, %r12 ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
xorl %ecx, %ecx ; \
movq 0x8+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rcx, %r13 ; \
xorl %ecx, %ecx ; \
movq 0x10+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
adcxq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
movq 0x18+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %r15 ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
adcxq %rcx, %r15 ; \
movabsq $0x1000003d1, %rdx ; \
xorl %ecx, %ecx ; \
mulxq %r12, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq %r13, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq %r14, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq %r15, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
adcxq %rcx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulxq %rax, %rax, %rbx ; \
addq %rax, %r8 ; \
adcq %rbx, %r9 ; \
adcq %rcx, %r10 ; \
adcq %rcx, %r11 ; \
cmovbq %rcx, %rdx ; \
subq %rdx, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rcx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Corresponds exactly to bignum_sqr_p256k1
#define sqr_p256k1(P0,P1) \
movq P1, %rdx ; \
mulxq %rdx, %r8, %r15 ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %r13, %r14 ; \
xorl %ebx, %ebx ; \
mulxq P1, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 0x8+P1, %rax, %rcx ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x8+P1, %rax, %rcx ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rbx, %r13 ; \
adoxq %rbx, %r14 ; \
adcq %rbx, %r14 ; \
xorl %ebx, %ebx ; \
adcxq %r9, %r9 ; \
adoxq %r15, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %rbx, %r15 ; \
adoxq %rbx, %r15 ; \
movabsq $0x1000003d1, %rdx ; \
xorl %ebx, %ebx ; \
mulxq %r12, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
mulxq %r13, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
mulxq %r14, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq %r15, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
adcxq %rbx, %r12 ; \
leaq 0x1(%r12), %rax ; \
mulxq %rax, %rax, %rcx ; \
addq %rax, %r8 ; \
adcq %rcx, %r9 ; \
adcq %rbx, %r10 ; \
adcq %rbx, %r11 ; \
sbbq %rax, %rax ; \
notq %rax; \
andq %rdx, %rax ; \
subq %rax, %r8 ; \
sbbq %rbx, %r9 ; \
sbbq %rbx, %r10 ; \
sbbq %rbx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0
// Rough versions producing 5-word results
#define roughmul_p256k1(P0,P1,P2) \
xorl %ecx, %ecx ; \
movq P2, %rdx ; \
mulxq P1, %r8, %r9 ; \
mulxq 0x8+P1, %rax, %r10 ; \
addq %rax, %r9 ; \
mulxq 0x10+P1, %rax, %r11 ; \
adcq %rax, %r10 ; \
mulxq 0x18+P1, %rax, %r12 ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
xorl %ecx, %ecx ; \
movq 0x8+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rcx, %r13 ; \
xorl %ecx, %ecx ; \
movq 0x10+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
adcxq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
movq 0x18+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %r15 ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
adcxq %rcx, %r15 ; \
movabsq $0x1000003d1, %rdx ; \
xorl %ecx, %ecx ; \
mulxq %r12, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq %r13, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq %r14, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq %r15, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
adcxq %rcx, %r12 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0 ; \
movq %r12, 0x20+P0
#define roughsqr_p256k1(P0,P1) \
movq P1, %rdx ; \
mulxq %rdx, %r8, %r15 ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %r13, %r14 ; \
xorl %ebx, %ebx ; \
mulxq P1, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 0x8+P1, %rax, %rcx ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x8+P1, %rax, %rcx ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rbx, %r13 ; \
adoxq %rbx, %r14 ; \
adcq %rbx, %r14 ; \
xorl %ebx, %ebx ; \
adcxq %r9, %r9 ; \
adoxq %r15, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %rbx, %r15 ; \
adoxq %rbx, %r15 ; \
movabsq $0x1000003d1, %rdx ; \
xorl %ebx, %ebx ; \
mulxq %r12, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
mulxq %r13, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
mulxq %r14, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq %r15, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
adcxq %rbx, %r12 ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0 ; \
movq %r12, 0x20+P0
// Weak doubling operation, staying in 4 digits but not in general
// fully normalizing
#define weakdouble_p256k1(P0,P1) \
movq 24+P1, %r11 ; \
movq 16+P1, %r10 ; \
movq $0x1000003d1, %rax ; \
xorq %rdx, %rdx ; \
shldq $1, %r10, %r11 ; \
cmovncq %rdx, %rax ; \
movq 8+P1, %r9 ; \
shldq $1, %r9, %r10 ; \
movq P1, %r8 ; \
shldq $1, %r8, %r9 ; \
shlq $1, %r8 ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rdx, %r10 ; \
adcq %rdx, %r11 ; \
movq %r8, P0 ; \
movq %r9, 8+P0 ; \
movq %r10, 16+P0 ; \
movq %r11, 24+P0
// P0 = C * P1 - D * P2 with 5-word inputs P1 and P2
// Only used here with C = 12, D = 9, but could be used more generally.
// We actually compute C * P1 + D * (2^33 * p_256k1 - P2)
#define cmsub_p256k1(P0,C,P1,D,P2) \
movq $0xfffff85e00000000, %r8 ; \
subq P2, %r8 ; \
movq $0xfffffffffffffffd, %r9 ; \
sbbq 8+P2, %r9 ; \
movq $0xffffffffffffffff, %r10 ; \
sbbq 16+P2, %r10 ; \
movq $0xffffffffffffffff, %r11 ; \
sbbq 24+P2, %r11 ; \
movq $0x00000001ffffffff, %r12 ; \
sbbq 32+P2, %r12 ; \
movq $D, %rdx ; \
mulxq %r8, %r8, %rax ; \
mulxq %r9, %r9, %rcx ; \
addq %rax, %r9 ; \
mulxq %r10, %r10, %rax ; \
adcq %rcx, %r10 ; \
mulxq %r11, %r11, %rcx ; \
adcq %rax, %r11 ; \
mulxq %r12, %r12, %rax ; \
adcq %rcx, %r12 ; \
movq $C, %rdx ; \
xorq %rbx, %rbx ; \
mulxq P1, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
mulxq 8+P1, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
mulxq 16+P1, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 24+P1, %rax, %rcx ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
mulxq 32+P1, %rax, %rcx ; \
adcxq %rax, %r12 ; \
leaq 0x1(%r12), %rax ; \
movq $0x1000003d1, %rcx ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rbx, %r10 ; \
adcq %rbx, %r11 ; \
cmovbq %rbx, %rcx ; \
subq %rcx, %r8 ; \
movq %r8, P0 ; \
sbbq %rbx, %r9 ; \
movq %r9, 8+P0 ; \
sbbq %rbx, %r10 ; \
movq %r10, 16+P0 ; \
sbbq %rbx, %r11 ; \
movq %r11, 24+P0 ; \
// P0 = 3 * P1 - 8 * P2 with 5-digit P1 and P2
// We actually compute 3 * P1 + (2^33 * p_256k1 - P2) << 3
#define cmsub38_p256k1(P0,P1,P2) \
movq $0xfffff85e00000000, %r8 ; \
subq P2, %r8 ; \
movq $0xfffffffffffffffd, %r9 ; \
sbbq 8+P2, %r9 ; \
movq $0xffffffffffffffff, %r10 ; \
sbbq 16+P2, %r10 ; \
movq $0xffffffffffffffff, %r11 ; \
sbbq 24+P2, %r11 ; \
movq $0x00000001ffffffff, %r12 ; \
sbbq 32+P2, %r12 ; \
shldq $3, %r11, %r12 ; \
shldq $3, %r10, %r11 ; \
shldq $3, %r9, %r10 ; \
shldq $3, %r8, %r9 ; \
shlq $3, %r8 ; \
movq $3, %rdx ; \
xorq %rbx, %rbx ; \
mulxq P1, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
mulxq 8+P1, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
mulxq 16+P1, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 24+P1, %rax, %rcx ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
mulxq 32+P1, %rax, %rcx ; \
adcxq %rax, %r12 ; \
leaq 0x1(%r12), %rax ; \
movq $0x1000003d1, %rcx ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %rbx, %r10 ; \
adcq %rbx, %r11 ; \
cmovbq %rbx, %rcx ; \
subq %rcx, %r8 ; \
movq %r8, P0 ; \
sbbq %rbx, %r9 ; \
movq %r9, 8+P0 ; \
sbbq %rbx, %r10 ; \
movq %r10, 16+P0 ; \
sbbq %rbx, %r11 ; \
movq %r11, 24+P0 ; \
// P0 = 4 * P1 - P2 with 5-digit P1, 4-digit P2 and result.
// This is done by direct subtraction of P2 since the method
// in bignum_cmul_p256k1 etc. for quotient estimation still
// works when the value to be reduced is negative, as
// long as it is > -p_256k1, which is the case here.
#define cmsub41_p256k1(P0,P1,P2) \
movq 32+P1, %r12 ; \
movq 24+P1, %r11 ; \
shldq $2, %r11, %r12 ; \
movq 16+P1, %r10 ; \
shldq $2, %r10, %r11 ; \
movq 8+P1, %r9 ; \
shldq $2, %r9, %r10 ; \
movq P1, %r8 ; \
shldq $2, %r8, %r9 ; \
shlq $2, %r8 ; \
subq P2, %r8 ; \
sbbq 8+P2, %r9 ; \
sbbq 16+P2, %r10 ; \
sbbq 24+P2, %r11 ; \
sbbq $0, %r12 ; \
leaq 0x1(%r12), %rax ; \
movq $0x1000003d1, %rcx ; \
mulq %rcx; \
xorq %rbx, %rbx ; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
adcq $0x0, %r11 ; \
cmovbq %rbx, %rcx ; \
subq %rcx, %r8 ; \
movq %r8, P0 ; \
sbbq %rbx, %r9 ; \
movq %r9, 8+P0 ; \
sbbq %rbx, %r10 ; \
movq %r10, 16+P0 ; \
sbbq %rbx, %r11 ; \
movq %r11, 24+P0 ; \
S2N_BN_SYMBOL(secp256k1_jdouble):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room on stack for temporary variables
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Main sequence of operations
// y_2 = y^2
sqr_p256k1(y_2,y_1)
// x_2 = x^2
sqr_p256k1(x_2,x_1)
// tmp = 2 * y_1 (in 4 words but not fully normalized)
weakdouble_p256k1(tmp,y_1)
// xy2 = x * y^2 (5-digit partially reduced)
// x_4 = x^4 (5-digit partially reduced)
roughmul_p256k1(xy2,x_1,y_2)
roughsqr_p256k1(x_4,x_2)
// z_3 = 2 * y_1 * z_1
mul_p256k1(z_3,z_1,tmp)
// d = 12 * xy2 - 9 * x_4
cmsub_p256k1(d,12,xy2,9,x_4)
// y4 = y2^2 (5-digit partially reduced)
roughsqr_p256k1(y_4,y_2)
// dx2 = d * x_2 (5-digit partially reduced)
roughmul_p256k1(dx2,x_2,d)
// x_3 = 4 * xy2 - d
cmsub41_p256k1(x_3,xy2,d)
// y_3 = 3 * dx2 - 8 * y_4
cmsub38_p256k1(y_3,dx2,y_4)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(secp256k1_jdouble)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 6,316
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/secp256k1/bignum_montmul_p256k1.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery multiply, z := (x * y / 2^256) mod p_256k1
// Inputs x[4], y[4]; output z[4]
//
// extern void bignum_montmul_p256k1(uint64_t z[static 4],
// const uint64_t x[static 4],
// const uint64_t y[static 4]);
//
// Does z := (2^{-256} * x * y) mod p_256k1, assuming that the inputs x and y
// satisfy x * y <= 2^256 * p_256k2 (in particular this is true if we are in
// the "usual" case x < p_256k1 and y < p_256k1).
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p256k1)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p256k1)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p256k1)
.text
// These are actually right
#define z %rdi
#define x %rsi
// Copied in or set up
#define y %rcx
// A zero register
#define zero %rbp
#define zeroe %ebp
// Also used for multiplicative inverse in second part
#define w %rbp
// mulpadd(high,low,m) adds %rdx * m to a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax and %rbx as temporaries.
#define mulpadd(high,low,m) \
mulxq m, %rax, %rbx ; \
adcxq %rax, low ; \
adoxq %rbx, high
// mulpade(high,low,i) adds %rdx * x[i] to a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax as a temporary, assuming high created from scratch
// and that zero has value zero.
#define mulpade(high,low,m) \
mulxq m, %rax, high ; \
adcxq %rax, low ; \
adoxq zero, high
S2N_BN_SYMBOL(bignum_montmul_p256k1):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Copy y into a safe register to start with
movq %rdx, y
// Zero a register, which also makes sure we don't get a fake carry-in
xorl zeroe, zeroe
// Do the zeroth row, which is a bit different
movq (y), %rdx
mulxq (x), %r8, %r9
mulxq 8(x), %rax, %r10
addq %rax, %r9
mulxq 16(x), %rax, %r11
adcq %rax, %r10
mulxq 24(x), %rax, %r12
adcq %rax, %r11
adcq zero, %r12
// Add row 1
xorl zeroe, zeroe
movq 8(y), %rdx
mulpadd(%r10,%r9,(x))
mulpadd(%r11,%r10,8(x))
mulpadd(%r12,%r11,16(x))
mulpade(%r13,%r12,24(x))
adcxq zero, %r13
// Add row 2
xorl zeroe, zeroe
movq 16(y), %rdx
mulpadd(%r11,%r10,(x))
mulpadd(%r12,%r11,8(x))
mulpadd(%r13,%r12,16(x))
mulpade(%r14,%r13,24(x));
adcxq zero, %r14
// Add row 3
xorl zeroe, zeroe
movq 24(y), %rdx
mulpadd(%r12,%r11,(x))
mulpadd(%r13,%r12,8(x))
mulpadd(%r14,%r13,16(x));
mulpade(%r15,%r14,24(x));
adcxq zero, %r15
// Now we have the full 8-digit product 2^256 * h + l where
// h = [%r15,%r14,%r13,%r12] and l = [%r11,%r10,%r9,%r8]
// Do Montgomery reductions, now using %rcx as a carry-saver.
// A direct carry chain is possible using mulx exclusively, but it
// requires more moves and overall seems to have lower performance.
movq $0xd838091dd2253531, w
movq $4294968273, %rbx
// Montgomery reduce row 0
movq %rbx, %rax
imulq w, %r8
mulq %r8
subq %rdx, %r9
sbbq %rcx, %rcx
// Montgomery reduce row 1
movq %rbx, %rax
imulq w, %r9
mulq %r9
negq %rcx
sbbq %rdx, %r10
sbbq %rcx, %rcx
// Montgomery reduce row 2
movq %rbx, %rax
imulq w, %r10
mulq %r10
negq %rcx
sbbq %rdx, %r11
sbbq %rcx, %rcx
// Montgomery reduce row 3
movq %rbx, %rax
imulq w, %r11
mulq %r11
negq %rcx
// Now [%r15,%r14,%r13,%r12] := [%r15,%r14,%r13,%r12] + [%r11,%r10,%r9,%r8] - (%rdx + CF)
sbbq %rdx, %r8
sbbq $0, %r9
sbbq $0, %r10
sbbq $0, %r11
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
sbbq w, w
// Let b be the top carry captured just above as w = (2^64-1) * b
// Now if [b,%r15,%r14,%r13,%r12] >= p_256k1, subtract p_256k1, i.e. add 4294968273
// and either way throw away the top word. [b,%r15,%r14,%r13,%r12] - p_256k1 =
// [(b - 1),%r15,%r14,%r13,%r12] + 4294968273. If [%r15,%r14,%r13,%r12] + 4294968273
// gives carry flag CF then >= comparison is top = 0 <=> b - 1 + CF = 0 which
// is equivalent to b \/ CF, and so to (2^64-1) * b + (2^64 - 1) + CF >= 2^64
movq %r12, %r8
addq %rbx, %r8
movq %r13, %r9
adcq $0, %r9
movq %r14, %r10
adcq $0, %r10
movq %r15, %r11
adcq $0, %r11
adcq $-1, w
// Write everything back
cmovcq %r8, %r12
movq %r12, (z)
cmovcq %r9, %r13
movq %r13, 8(z)
cmovcq %r10, %r14
movq %r14, 16(z)
cmovcq %r11, %r15
movq %r15, 24(z)
// Restore registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p256k1)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,218
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_half_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Halve modulo p_sm2, z := (x / 2) mod p_sm2, assuming x reduced
// Input x[4]; output z[4]
//
// extern void bignum_half_sm2(uint64_t z[static 4], const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_half_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_half_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_half_sm2)
.text
#define z %rdi
#define x %rsi
#define a %rax
#define d0 %rcx
#define d1 %rdx
#define d2 %r8
#define d3 %r9
#define d0short %ecx
#define d1short %edx
S2N_BN_SYMBOL(bignum_half_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load lowest digit and get a mask for its lowest bit in d0
movq (x), a
movl $1, d0short
andq a, d0
negq d0
// Create a masked version of p_sm2
movq $0xffffffff00000000, d1
andq d0, d1
movq d0, d2
movq $0xfffffffeffffffff, d3
andq d0, d3
// Perform addition with masked p_sm2. Catch the carry in a, as a bitmask
// for convenience though we only use its LSB below with SHRD
addq a, d0
adcq 8(x), d1
adcq 16(x), d2
adcq 24(x), d3
sbbq a, a
// Shift right, pushing the carry back down, and store back
shrdq $1, d1, d0
movq d0, (z)
shrdq $1, d2, d1
movq d1, 8(z)
shrdq $1, d3, d2
movq d2, 16(z)
shrdq $1, a, d3
movq d3, 24(z)
// Return
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_half_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 112,546
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/sm2_montjscalarmul.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery-Jacobian form scalar multiplication for GM/T 0003-2012 curve SM2
// Input scalar[4], point[12]; output res[12]
//
// extern void sm2_montjscalarmul
// (uint64_t res[static 12],
// const uint64_t scalar[static 4],
// const uint64_t point[static 12]);
//
// This function is a variant of its affine point version sm2_scalarmul.
// Here, input and output points are assumed to be in Jacobian form with
// their coordinates in the Montgomery domain. Thus, if priming indicates
// Montgomery form, x' = (2^256 * x) mod p_sm2 etc., each point argument
// is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when
// z' is nonzero or the point at infinity (group identity) if z' = 0.
//
// Given scalar = n and point = P, assumed to be on the NIST elliptic
// curve SM2, returns a representation of n * P. If the result is the
// point at infinity (either because the input point was or because the
// scalar was a multiple of p_sm2) then the output is guaranteed to
// represent the point at infinity, i.e. to have its z coordinate zero.
//
// Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point
// Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjscalarmul)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjscalarmul)
S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjscalarmul)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 32
// Intermediate variables on the stack. Uppercase syntactic variants
// make x86_att version simpler to generate.
#define SCALARB (0*NUMSIZE)
#define scalarb (0*NUMSIZE)(%rsp)
#define ACC (1*NUMSIZE)
#define acc (1*NUMSIZE)(%rsp)
#define TABENT (4*NUMSIZE)
#define tabent (4*NUMSIZE)(%rsp)
#define TAB (7*NUMSIZE)
#define tab (7*NUMSIZE)(%rsp)
#define res (31*NUMSIZE)(%rsp)
#define NSPACE 32*NUMSIZE
// Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator,
// which doesn't accept repetitions, assembler macros etc.
#define selectblock(I) \
cmpq $I, %rdi ; \
cmovzq TAB+96*(I-1)(%rsp), %rax ; \
cmovzq TAB+96*(I-1)+8(%rsp), %rbx ; \
cmovzq TAB+96*(I-1)+16(%rsp), %rcx ; \
cmovzq TAB+96*(I-1)+24(%rsp), %rdx ; \
cmovzq TAB+96*(I-1)+32(%rsp), %r8 ; \
cmovzq TAB+96*(I-1)+40(%rsp), %r9 ; \
cmovzq TAB+96*(I-1)+48(%rsp), %r10 ; \
cmovzq TAB+96*(I-1)+56(%rsp), %r11 ; \
cmovzq TAB+96*(I-1)+64(%rsp), %r12 ; \
cmovzq TAB+96*(I-1)+72(%rsp), %r13 ; \
cmovzq TAB+96*(I-1)+80(%rsp), %r14 ; \
cmovzq TAB+96*(I-1)+88(%rsp), %r15
S2N_BN_SYMBOL(sm2_montjscalarmul):
CFI_START
_CET_ENDBR
// The Windows version literally calls the standard ABI version.
// This simplifies the proofs since subroutine offsets are fixed.
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
CFI_CALL(Lsm2_montjscalarmul_standard)
CFI_POP(%rsi)
CFI_POP(%rdi)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(sm2_montjscalarmul)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lsm2_montjscalarmul_standard)
Lsm2_montjscalarmul_standard:
CFI_START
#endif
// Real start of the standard ABI code.
CFI_PUSH(%r15)
CFI_PUSH(%r14)
CFI_PUSH(%r13)
CFI_PUSH(%r12)
CFI_PUSH(%rbp)
CFI_PUSH(%rbx)
CFI_DEC_RSP(NSPACE)
// Preserve the "res" and "point" input arguments. We load and process the
// scalar immediately so we don't bother preserving that input argument.
// Also, "point" is only needed early on and so its register gets re-used.
movq %rdx, %rbx
movq %rdi, res
// Load the digits of group order n_sm2 = [%r15;%r14;%r13;%r12]
movq $0x53bbf40939d54123, %r12
movq $0x7203df6b21c6052b, %r13
movq $0xffffffffffffffff, %r14
movq $0xfffffffeffffffff, %r15
// First, reduce the input scalar mod n_sm2, i.e. conditionally subtract n_sm2
movq (%rsi), %r8
subq %r12, %r8
movq 8(%rsi), %r9
sbbq %r13, %r9
movq 16(%rsi), %r10
sbbq %r14, %r10
movq 24(%rsi), %r11
sbbq %r15, %r11
cmovcq (%rsi), %r8
cmovcq 8(%rsi), %r9
cmovcq 16(%rsi), %r10
cmovcq 24(%rsi), %r11
// Now if the top bit of the reduced scalar is set, negate it mod n_sm2,
// i.e. do n |-> n_sm2 - n. Remember the sign in %rbp so we can
// correspondingly negate the point below.
subq %r8, %r12
sbbq %r9, %r13
sbbq %r10, %r14
sbbq %r11, %r15
movq %r11, %rbp
shrq $63, %rbp
cmovnzq %r12, %r8
cmovnzq %r13, %r9
cmovnzq %r14, %r10
cmovnzq %r15, %r11
// In either case then add the recoding constant 0x08888...888 to allow
// signed digits.
movq $0x8888888888888888, %rax
addq %rax, %r8
adcq %rax, %r9
adcq %rax, %r10
adcq %rax, %r11
btc $63, %r11
movq %r8, SCALARB(%rsp)
movq %r9, SCALARB+8(%rsp)
movq %r10, SCALARB+16(%rsp)
movq %r11, SCALARB+24(%rsp)
// Set the tab[0] table entry to the input point = 1 * P, except
// that we negate it if the top bit of the scalar was set. This
// negation takes care over the y = 0 case to maintain all the
// coordinates < p_sm2 throughout, even though triples (x,y,z)
// with y = 0 can only represent a point on the curve when z = 0
// and it represents the point at infinity regardless of x and y.
movq (%rbx), %rax
movq %rax, TAB(%rsp)
movq 8(%rbx), %rax
movq %rax, TAB+8(%rsp)
movq 16(%rbx), %rax
movq %rax, TAB+16(%rsp)
movq 24(%rbx), %rax
movq %rax, TAB+24(%rsp)
movq 32(%rbx), %r12
movq %r12, %rax
movq 40(%rbx), %r13
orq %r13, %rax
movq 48(%rbx), %r14
movq %r14, %rcx
movq 56(%rbx), %r15
orq %r15, %rcx
orq %rcx, %rax
cmovzq %rax, %rbp
xorl %r11d, %r11d
movl $0x00000000ffffffff, %r9d
notq %r11
movq %r11, %r8
movq %r11, %r10
xorq %r8, %r9
btr $32, %r11
subq %r12, %r8
sbbq %r13, %r9
sbbq %r14, %r10
sbbq %r15, %r11
testq %rbp, %rbp
cmovzq %r12, %r8
cmovzq %r13, %r9
cmovzq %r14, %r10
cmovzq %r15, %r11
movq %r8, TAB+32(%rsp)
movq %r9, TAB+40(%rsp)
movq %r10, TAB+48(%rsp)
movq %r11, TAB+56(%rsp)
movq 64(%rbx), %rax
movq %rax, TAB+64(%rsp)
movq 72(%rbx), %rax
movq %rax, TAB+72(%rsp)
movq 80(%rbx), %rax
movq %rax, TAB+80(%rsp)
movq 88(%rbx), %rax
movq %rax, TAB+88(%rsp)
// Compute and record tab[1] = 2 * p, ..., tab[7] = 8 * P
leaq TAB+96*1(%rsp), %rdi
leaq TAB(%rsp), %rsi
CFI_CALL(Lsm2_montjscalarmul_sm2_montjdouble)
leaq TAB+96*2(%rsp), %rdi
leaq TAB+96*1(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lsm2_montjscalarmul_sm2_montjadd)
leaq TAB+96*3(%rsp), %rdi
leaq TAB+96*1(%rsp), %rsi
CFI_CALL(Lsm2_montjscalarmul_sm2_montjdouble)
leaq TAB+96*4(%rsp), %rdi
leaq TAB+96*3(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lsm2_montjscalarmul_sm2_montjadd)
leaq TAB+96*5(%rsp), %rdi
leaq TAB+96*2(%rsp), %rsi
CFI_CALL(Lsm2_montjscalarmul_sm2_montjdouble)
leaq TAB+96*6(%rsp), %rdi
leaq TAB+96*5(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lsm2_montjscalarmul_sm2_montjadd)
leaq TAB+96*7(%rsp), %rdi
leaq TAB+96*3(%rsp), %rsi
CFI_CALL(Lsm2_montjscalarmul_sm2_montjdouble)
// Set up accumulator as table entry for top 4 bits (constant-time indexing)
movq SCALARB+24(%rsp), %rdi
shrq $60, %rdi
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
xorl %r10d, %r10d
xorl %r11d, %r11d
xorl %r12d, %r12d
xorl %r13d, %r13d
xorl %r14d, %r14d
xorl %r15d, %r15d
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
movq %rax, ACC(%rsp)
movq %rbx, ACC+8(%rsp)
movq %rcx, ACC+16(%rsp)
movq %rdx, ACC+24(%rsp)
movq %r8, ACC+32(%rsp)
movq %r9, ACC+40(%rsp)
movq %r10, ACC+48(%rsp)
movq %r11, ACC+56(%rsp)
movq %r12, ACC+64(%rsp)
movq %r13, ACC+72(%rsp)
movq %r14, ACC+80(%rsp)
movq %r15, ACC+88(%rsp)
// Main loop over size-4 bitfield
movl $252, %ebp
Lsm2_montjscalarmul_mainloop:
subq $4, %rbp
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lsm2_montjscalarmul_sm2_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lsm2_montjscalarmul_sm2_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lsm2_montjscalarmul_sm2_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lsm2_montjscalarmul_sm2_montjdouble)
movq %rbp, %rax
shrq $6, %rax
movq (%rsp,%rax,8), %rdi
movq %rbp, %rcx
shrq %cl, %rdi
andq $15, %rdi
subq $8, %rdi
sbbq %rsi, %rsi // %rsi = sign of digit (-1 = negative)
xorq %rsi, %rdi
subq %rsi, %rdi // %rdi = absolute value of digit
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
xorl %r10d, %r10d
xorl %r11d, %r11d
xorl %r12d, %r12d
xorl %r13d, %r13d
xorl %r14d, %r14d
xorl %r15d, %r15d
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
// Store it to "tabent" with the y coordinate optionally negated
// Again, do it carefully to give coordinates < p_sm2 even in
// the degenerate case y = 0 (when z = 0 for points on the curve).
movq %rax, TABENT(%rsp)
movq %rbx, TABENT+8(%rsp)
movq %rcx, TABENT+16(%rsp)
movq %rdx, TABENT+24(%rsp)
movq %r12, TABENT+64(%rsp)
movq %r13, TABENT+72(%rsp)
movq %r14, TABENT+80(%rsp)
movq %r15, TABENT+88(%rsp)
xorl %r15d, %r15d
movq %r8, %rax
movl $0x00000000ffffffff, %r13d
orq %r9, %rax
notq %r15
movq %r10, %rcx
movq %r15, %r12
orq %r11, %rcx
movq %r15, %r14
xorq %r12, %r13
btr $32, %r15
orq %rcx, %rax
cmovzq %rax, %rsi
subq %r8, %r12
sbbq %r9, %r13
sbbq %r10, %r14
sbbq %r11, %r15
testq %rsi, %rsi
cmovnzq %r12, %r8
cmovnzq %r13, %r9
cmovnzq %r14, %r10
cmovnzq %r15, %r11
movq %r8, TABENT+32(%rsp)
movq %r9, TABENT+40(%rsp)
movq %r10, TABENT+48(%rsp)
movq %r11, TABENT+56(%rsp)
leaq TABENT(%rsp), %rdx
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lsm2_montjscalarmul_sm2_montjadd)
testq %rbp, %rbp
jne Lsm2_montjscalarmul_mainloop
// That's the end of the main loop, and we just need to copy the
// result in "acc" to the output.
movq res, %rdi
movq ACC(%rsp), %rax
movq %rax, (%rdi)
movq ACC+8(%rsp), %rax
movq %rax, 8(%rdi)
movq ACC+16(%rsp), %rax
movq %rax, 16(%rdi)
movq ACC+24(%rsp), %rax
movq %rax, 24(%rdi)
movq ACC+32(%rsp), %rax
movq %rax, 32(%rdi)
movq ACC+40(%rsp), %rax
movq %rax, 40(%rdi)
movq ACC+48(%rsp), %rax
movq %rax, 48(%rdi)
movq ACC+56(%rsp), %rax
movq %rax, 56(%rdi)
movq ACC+64(%rsp), %rax
movq %rax, 64(%rdi)
movq ACC+72(%rsp), %rax
movq %rax, 72(%rdi)
movq ACC+80(%rsp), %rax
movq %rax, 80(%rdi)
movq ACC+88(%rsp), %rax
movq %rax, 88(%rdi)
// Restore stack and registers and return
CFI_INC_RSP(NSPACE)
CFI_POP(%rbx)
CFI_POP(%rbp)
CFI_POP(%r12)
CFI_POP(%r13)
CFI_POP(%r14)
CFI_POP(%r15)
CFI_RET
#if WINDOWS_ABI
S2N_BN_SIZE_DIRECTIVE(Lsm2_montjscalarmul_standard)
#else
S2N_BN_SIZE_DIRECTIVE(sm2_montjscalarmul)
#endif
// Local copies of subroutines, complete clones at the moment
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lsm2_montjscalarmul_sm2_montjadd)
Lsm2_montjscalarmul_sm2_montjadd:
CFI_START
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(224)
movq %rdx, %rbp
movq 0x40(%rsi), %rdx
mulxq %rdx, %r8, %r15
mulxq 0x48(%rsi), %r9, %r10
mulxq 0x58(%rsi), %r11, %r12
movq 0x50(%rsi), %rdx
mulxq 0x58(%rsi), %r13, %r14
xorl %ecx, %ecx
mulxq 0x40(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x48(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x58(%rsi), %rdx
mulxq 0x48(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
adoxq %rcx, %r14
adcq %rcx, %r14
xorl %ecx, %ecx
adcxq %r9, %r9
adoxq %r15, %r9
movq 0x48(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x50(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x58(%rsi), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %rcx, %r15
adoxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
sbbq %rax, %rax
movabsq $0xffffffff00000000, %rbx
movq %rax, %rcx
andq %rax, %rbx
btr $0x20, %rcx
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rax, %r14
sbbq %rcx, %r15
movq %r12, (%rsp)
movq %r13, 0x8(%rsp)
movq %r14, 0x10(%rsp)
movq %r15, 0x18(%rsp)
movq 0x40(%rbp), %rdx
mulxq %rdx, %r8, %r15
mulxq 0x48(%rbp), %r9, %r10
mulxq 0x58(%rbp), %r11, %r12
movq 0x50(%rbp), %rdx
mulxq 0x58(%rbp), %r13, %r14
xorl %ecx, %ecx
mulxq 0x40(%rbp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x48(%rbp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x58(%rbp), %rdx
mulxq 0x48(%rbp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
adoxq %rcx, %r14
adcq %rcx, %r14
xorl %ecx, %ecx
adcxq %r9, %r9
adoxq %r15, %r9
movq 0x48(%rbp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x50(%rbp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x58(%rbp), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %rcx, %r15
adoxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
sbbq %rax, %rax
movabsq $0xffffffff00000000, %rbx
movq %rax, %rcx
andq %rax, %rbx
btr $0x20, %rcx
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rax, %r14
sbbq %rcx, %r15
movq %r12, 0xa0(%rsp)
movq %r13, 0xa8(%rsp)
movq %r14, 0xb0(%rsp)
movq %r15, 0xb8(%rsp)
xorl %ecx, %ecx
movq 0x20(%rsi), %rdx
mulxq 0x40(%rbp), %r8, %r9
mulxq 0x48(%rbp), %rax, %r10
addq %rax, %r9
mulxq 0x50(%rbp), %rax, %r11
adcq %rax, %r10
mulxq 0x58(%rbp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x28(%rsi), %rdx
mulxq 0x40(%rbp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x48(%rbp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x50(%rbp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x58(%rbp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x30(%rsi), %rdx
mulxq 0x40(%rbp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x48(%rbp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x50(%rbp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x58(%rbp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x38(%rsi), %rdx
mulxq 0x40(%rbp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x48(%rbp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x50(%rbp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x58(%rbp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xc0(%rsp)
movq %r13, 0xc8(%rsp)
movq %r14, 0xd0(%rsp)
movq %r15, 0xd8(%rsp)
xorl %ecx, %ecx
movq 0x20(%rbp), %rdx
mulxq 0x40(%rsi), %r8, %r9
mulxq 0x48(%rsi), %rax, %r10
addq %rax, %r9
mulxq 0x50(%rsi), %rax, %r11
adcq %rax, %r10
mulxq 0x58(%rsi), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x28(%rbp), %rdx
mulxq 0x40(%rsi), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x48(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x50(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x58(%rsi), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x30(%rbp), %rdx
mulxq 0x40(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x48(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x50(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x58(%rsi), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x38(%rbp), %rdx
mulxq 0x40(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x48(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x50(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x58(%rsi), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x20(%rsp)
movq %r13, 0x28(%rsp)
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
xorl %ecx, %ecx
movq 0x0(%rbp), %rdx
mulxq (%rsp), %r8, %r9
mulxq 0x8(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0x10(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0x18(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x8(%rbp), %rdx
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x18(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x10(%rbp), %rdx
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x18(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x18(%rbp), %rdx
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x18(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x40(%rsp)
movq %r13, 0x48(%rsp)
movq %r14, 0x50(%rsp)
movq %r15, 0x58(%rsp)
xorl %ecx, %ecx
movq (%rsi), %rdx
mulxq 0xa0(%rsp), %r8, %r9
mulxq 0xa8(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0xb0(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0xb8(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x8(%rsi), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb8(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x10(%rsi), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb8(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x18(%rsi), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xb8(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x80(%rsp)
movq %r13, 0x88(%rsp)
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
xorl %ecx, %ecx
movq 0x20(%rsp), %rdx
mulxq (%rsp), %r8, %r9
mulxq 0x8(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0x10(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0x18(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x28(%rsp), %rdx
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x18(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x30(%rsp), %rdx
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x18(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x38(%rsp), %rdx
mulxq (%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x10(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x18(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x20(%rsp)
movq %r13, 0x28(%rsp)
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
xorl %ecx, %ecx
movq 0xc0(%rsp), %rdx
mulxq 0xa0(%rsp), %r8, %r9
mulxq 0xa8(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0xb0(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0xb8(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0xc8(%rsp), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb8(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0xd0(%rsp), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb8(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0xd8(%rsp), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xb8(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xc0(%rsp)
movq %r13, 0xc8(%rsp)
movq %r14, 0xd0(%rsp)
movq %r15, 0xd8(%rsp)
movq 0x40(%rsp), %rax
subq 0x80(%rsp), %rax
movq 0x48(%rsp), %rcx
sbbq 0x88(%rsp), %rcx
movq 0x50(%rsp), %r8
sbbq 0x90(%rsp), %r8
movq 0x58(%rsp), %r9
sbbq 0x98(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0xa0(%rsp)
adcq %r10, %rcx
movq %rcx, 0xa8(%rsp)
adcq %r11, %r8
movq %r8, 0xb0(%rsp)
adcq %rdx, %r9
movq %r9, 0xb8(%rsp)
movq 0x20(%rsp), %rax
subq 0xc0(%rsp), %rax
movq 0x28(%rsp), %rcx
sbbq 0xc8(%rsp), %rcx
movq 0x30(%rsp), %r8
sbbq 0xd0(%rsp), %r8
movq 0x38(%rsp), %r9
sbbq 0xd8(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x20(%rsp)
adcq %r10, %rcx
movq %rcx, 0x28(%rsp)
adcq %r11, %r8
movq %r8, 0x30(%rsp)
adcq %rdx, %r9
movq %r9, 0x38(%rsp)
movq 0xa0(%rsp), %rdx
mulxq %rdx, %r8, %r15
mulxq 0xa8(%rsp), %r9, %r10
mulxq 0xb8(%rsp), %r11, %r12
movq 0xb0(%rsp), %rdx
mulxq 0xb8(%rsp), %r13, %r14
xorl %ecx, %ecx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0xb8(%rsp), %rdx
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
adoxq %rcx, %r14
adcq %rcx, %r14
xorl %ecx, %ecx
adcxq %r9, %r9
adoxq %r15, %r9
movq 0xa8(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0xb0(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0xb8(%rsp), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %rcx, %r15
adoxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
sbbq %rax, %rax
movabsq $0xffffffff00000000, %rbx
movq %rax, %rcx
andq %rax, %rbx
btr $0x20, %rcx
subq %rax, %r12
sbbq %rbx, %r13
sbbq %rax, %r14
sbbq %rcx, %r15
movq %r12, 0x60(%rsp)
movq %r13, 0x68(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x78(%rsp)
movq 0x20(%rsp), %rdx
mulxq %rdx, %r8, %r15
mulxq 0x28(%rsp), %r9, %r10
mulxq 0x38(%rsp), %r11, %r12
movq 0x30(%rsp), %rdx
mulxq 0x38(%rsp), %r13, %r14
xorl %ecx, %ecx
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x28(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x38(%rsp), %rdx
mulxq 0x28(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
adoxq %rcx, %r14
adcq %rcx, %r14
xorl %ecx, %ecx
adcxq %r9, %r9
adoxq %r15, %r9
movq 0x28(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x30(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x38(%rsp), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %rcx, %r15
adoxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, (%rsp)
movq %r13, 0x8(%rsp)
movq %r14, 0x10(%rsp)
movq %r15, 0x18(%rsp)
xorl %ecx, %ecx
movq 0x80(%rsp), %rdx
mulxq 0x60(%rsp), %r8, %r9
mulxq 0x68(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0x70(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0x78(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x88(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x70(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x78(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x90(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x70(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x78(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x98(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x70(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x78(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x80(%rsp)
movq %r13, 0x88(%rsp)
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
xorl %ecx, %ecx
movq 0x40(%rsp), %rdx
mulxq 0x60(%rsp), %r8, %r9
mulxq 0x68(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0x70(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0x78(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x48(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x70(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x78(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x50(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x70(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x78(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x58(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x70(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x78(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x40(%rsp)
movq %r13, 0x48(%rsp)
movq %r14, 0x50(%rsp)
movq %r15, 0x58(%rsp)
movq (%rsp), %rax
subq 0x80(%rsp), %rax
movq 0x8(%rsp), %rcx
sbbq 0x88(%rsp), %rcx
movq 0x10(%rsp), %r8
sbbq 0x90(%rsp), %r8
movq 0x18(%rsp), %r9
sbbq 0x98(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, (%rsp)
adcq %r10, %rcx
movq %rcx, 0x8(%rsp)
adcq %r11, %r8
movq %r8, 0x10(%rsp)
adcq %rdx, %r9
movq %r9, 0x18(%rsp)
movq 0x40(%rsp), %rax
subq 0x80(%rsp), %rax
movq 0x48(%rsp), %rcx
sbbq 0x88(%rsp), %rcx
movq 0x50(%rsp), %r8
sbbq 0x90(%rsp), %r8
movq 0x58(%rsp), %r9
sbbq 0x98(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x60(%rsp)
adcq %r10, %rcx
movq %rcx, 0x68(%rsp)
adcq %r11, %r8
movq %r8, 0x70(%rsp)
adcq %rdx, %r9
movq %r9, 0x78(%rsp)
xorl %ecx, %ecx
movq 0x40(%rsi), %rdx
mulxq 0xa0(%rsp), %r8, %r9
mulxq 0xa8(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0xb0(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0xb8(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x48(%rsi), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb8(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x50(%rsi), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb8(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x58(%rsi), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xb8(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xa0(%rsp)
movq %r13, 0xa8(%rsp)
movq %r14, 0xb0(%rsp)
movq %r15, 0xb8(%rsp)
movq (%rsp), %rax
subq 0x40(%rsp), %rax
movq 0x8(%rsp), %rcx
sbbq 0x48(%rsp), %rcx
movq 0x10(%rsp), %r8
sbbq 0x50(%rsp), %r8
movq 0x18(%rsp), %r9
sbbq 0x58(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, (%rsp)
adcq %r10, %rcx
movq %rcx, 0x8(%rsp)
adcq %r11, %r8
movq %r8, 0x10(%rsp)
adcq %rdx, %r9
movq %r9, 0x18(%rsp)
movq 0x80(%rsp), %rax
subq (%rsp), %rax
movq 0x88(%rsp), %rcx
sbbq 0x8(%rsp), %rcx
movq 0x90(%rsp), %r8
sbbq 0x10(%rsp), %r8
movq 0x98(%rsp), %r9
sbbq 0x18(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x80(%rsp)
adcq %r10, %rcx
movq %rcx, 0x88(%rsp)
adcq %r11, %r8
movq %r8, 0x90(%rsp)
adcq %rdx, %r9
movq %r9, 0x98(%rsp)
xorl %ecx, %ecx
movq 0xc0(%rsp), %rdx
mulxq 0x60(%rsp), %r8, %r9
mulxq 0x68(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0x70(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0x78(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0xc8(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x70(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x78(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0xd0(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x70(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x78(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0xd8(%rsp), %rdx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x70(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x78(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x60(%rsp)
movq %r13, 0x68(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x78(%rsp)
xorl %ecx, %ecx
movq 0x40(%rbp), %rdx
mulxq 0xa0(%rsp), %r8, %r9
mulxq 0xa8(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0xb0(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0xb8(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x48(%rbp), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb8(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x50(%rbp), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb8(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x58(%rbp), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xb8(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xa0(%rsp)
movq %r13, 0xa8(%rsp)
movq %r14, 0xb0(%rsp)
movq %r15, 0xb8(%rsp)
xorl %ecx, %ecx
movq 0x80(%rsp), %rdx
mulxq 0x20(%rsp), %r8, %r9
mulxq 0x28(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0x30(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0x38(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x88(%rsp), %rdx
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x28(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x38(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x90(%rsp), %rdx
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x28(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x38(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x98(%rsp), %rdx
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x28(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x30(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x38(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x80(%rsp)
movq %r13, 0x88(%rsp)
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq 0x80(%rsp), %rax
subq 0x60(%rsp), %rax
movq 0x88(%rsp), %rcx
sbbq 0x68(%rsp), %rcx
movq 0x90(%rsp), %r8
sbbq 0x70(%rsp), %r8
movq 0x98(%rsp), %r9
sbbq 0x78(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x80(%rsp)
adcq %r10, %rcx
movq %rcx, 0x88(%rsp)
adcq %r11, %r8
movq %r8, 0x90(%rsp)
adcq %rdx, %r9
movq %r9, 0x98(%rsp)
movq 0x40(%rsi), %r8
movq 0x48(%rsi), %r9
movq 0x50(%rsi), %r10
movq 0x58(%rsi), %r11
movq %r8, %rax
movq %r9, %rdx
orq %r10, %rax
orq %r11, %rdx
orq %rdx, %rax
negq %rax
sbbq %rax, %rax
movq 0x40(%rbp), %r12
movq 0x48(%rbp), %r13
movq 0x50(%rbp), %r14
movq 0x58(%rbp), %r15
movq %r12, %rbx
movq %r13, %rdx
orq %r14, %rbx
orq %r15, %rdx
orq %rdx, %rbx
negq %rbx
sbbq %rbx, %rbx
cmpq %rax, %rbx
cmovbq %r8, %r12
cmovbq %r9, %r13
cmovbq %r10, %r14
cmovbq %r11, %r15
cmoveq 0xa0(%rsp), %r12
cmoveq 0xa8(%rsp), %r13
cmoveq 0xb0(%rsp), %r14
cmoveq 0xb8(%rsp), %r15
movq (%rsp), %rax
cmovbq (%rsi), %rax
cmova 0x0(%rbp), %rax
movq 0x8(%rsp), %rbx
cmovbq 0x8(%rsi), %rbx
cmova 0x8(%rbp), %rbx
movq 0x10(%rsp), %rcx
cmovbq 0x10(%rsi), %rcx
cmova 0x10(%rbp), %rcx
movq 0x18(%rsp), %rdx
cmovbq 0x18(%rsi), %rdx
cmova 0x18(%rbp), %rdx
movq 0x80(%rsp), %r8
cmovbq 0x20(%rsi), %r8
cmova 0x20(%rbp), %r8
movq 0x88(%rsp), %r9
cmovbq 0x28(%rsi), %r9
cmova 0x28(%rbp), %r9
movq 0x90(%rsp), %r10
cmovbq 0x30(%rsi), %r10
cmova 0x30(%rbp), %r10
movq 0x98(%rsp), %r11
cmovbq 0x38(%rsi), %r11
cmova 0x38(%rbp), %r11
movq %rax, (%rdi)
movq %rbx, 0x8(%rdi)
movq %rcx, 0x10(%rdi)
movq %rdx, 0x18(%rdi)
movq %r8, 0x20(%rdi)
movq %r9, 0x28(%rdi)
movq %r10, 0x30(%rdi)
movq %r11, 0x38(%rdi)
movq %r12, 0x40(%rdi)
movq %r13, 0x48(%rdi)
movq %r14, 0x50(%rdi)
movq %r15, 0x58(%rdi)
CFI_INC_RSP(224)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(Lsm2_montjscalarmul_sm2_montjadd)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lsm2_montjscalarmul_sm2_montjdouble)
Lsm2_montjscalarmul_sm2_montjdouble:
CFI_START
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(192)
movq 0x40(%rsi), %rdx
mulxq %rdx, %r8, %r15
mulxq 0x48(%rsi), %r9, %r10
mulxq 0x58(%rsi), %r11, %r12
movq 0x50(%rsi), %rdx
mulxq 0x58(%rsi), %r13, %r14
xorl %ecx, %ecx
mulxq 0x40(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x48(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x58(%rsi), %rdx
mulxq 0x48(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
adoxq %rcx, %r14
adcq %rcx, %r14
xorl %ecx, %ecx
adcxq %r9, %r9
adoxq %r15, %r9
movq 0x48(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x50(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x58(%rsi), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %rcx, %r15
adoxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, (%rsp)
movq %r13, 0x8(%rsp)
movq %r14, 0x10(%rsp)
movq %r15, 0x18(%rsp)
movq 0x20(%rsi), %rdx
mulxq %rdx, %r8, %r15
mulxq 0x28(%rsi), %r9, %r10
mulxq 0x38(%rsi), %r11, %r12
movq 0x30(%rsi), %rdx
mulxq 0x38(%rsi), %r13, %r14
xorl %ecx, %ecx
mulxq 0x20(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x28(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x38(%rsi), %rdx
mulxq 0x28(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
adoxq %rcx, %r14
adcq %rcx, %r14
xorl %ecx, %ecx
adcxq %r9, %r9
adoxq %r15, %r9
movq 0x28(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x30(%rsi), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x38(%rsi), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %rcx, %r15
adoxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x20(%rsp)
movq %r13, 0x28(%rsp)
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq (%rsi), %rax
subq (%rsp), %rax
movq 0x8(%rsi), %rcx
sbbq 0x8(%rsp), %rcx
movq 0x10(%rsi), %r8
sbbq 0x10(%rsp), %r8
movq 0x18(%rsi), %r9
sbbq 0x18(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x60(%rsp)
adcq %r10, %rcx
movq %rcx, 0x68(%rsp)
adcq %r11, %r8
movq %r8, 0x70(%rsp)
adcq %rdx, %r9
movq %r9, 0x78(%rsp)
movq (%rsi), %rax
addq (%rsp), %rax
movq 0x8(%rsi), %rcx
adcq 0x8(%rsp), %rcx
movq 0x10(%rsi), %r8
adcq 0x10(%rsp), %r8
movq 0x18(%rsi), %r9
adcq 0x18(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
subq %r11, %rax
movq %rax, 0x40(%rsp)
sbbq %r10, %rcx
movq %rcx, 0x48(%rsp)
sbbq %r11, %r8
movq %r8, 0x50(%rsp)
sbbq %rdx, %r9
movq %r9, 0x58(%rsp)
xorl %ecx, %ecx
movq 0x60(%rsp), %rdx
mulxq 0x40(%rsp), %r8, %r9
mulxq 0x48(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0x50(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0x58(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x68(%rsp), %rdx
mulxq 0x40(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x58(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x70(%rsp), %rdx
mulxq 0x40(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x58(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x78(%rsp), %rdx
mulxq 0x40(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x50(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x58(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x60(%rsp)
movq %r13, 0x68(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x78(%rsp)
xorq %r11, %r11
movq 0x20(%rsi), %rax
addq 0x40(%rsi), %rax
movq 0x28(%rsi), %rcx
adcq 0x48(%rsi), %rcx
movq 0x30(%rsi), %r8
adcq 0x50(%rsi), %r8
movq 0x38(%rsi), %r9
adcq 0x58(%rsi), %r9
adcq %r11, %r11
subq $0xffffffffffffffff, %rax
movabsq $0xffffffff00000000, %r10
sbbq %r10, %rcx
sbbq $0xffffffffffffffff, %r8
movabsq $0xfffffffeffffffff, %rdx
sbbq %rdx, %r9
sbbq $0x0, %r11
andq %r11, %r10
andq %r11, %rdx
addq %r11, %rax
movq %rax, 0x40(%rsp)
adcq %r10, %rcx
movq %rcx, 0x48(%rsp)
adcq %r11, %r8
movq %r8, 0x50(%rsp)
adcq %rdx, %r9
movq %r9, 0x58(%rsp)
xorl %ecx, %ecx
movq 0x20(%rsp), %rdx
mulxq (%rsi), %r8, %r9
mulxq 0x8(%rsi), %rax, %r10
addq %rax, %r9
mulxq 0x10(%rsi), %rax, %r11
adcq %rax, %r10
mulxq 0x18(%rsi), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x28(%rsp), %rdx
mulxq (%rsi), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x8(%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x10(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x18(%rsi), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x30(%rsp), %rdx
mulxq (%rsi), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x8(%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x10(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x18(%rsi), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x38(%rsp), %rdx
mulxq (%rsi), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x8(%rsi), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x10(%rsi), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x18(%rsi), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x80(%rsp)
movq %r13, 0x88(%rsp)
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq 0x60(%rsp), %rdx
mulxq %rdx, %r8, %r15
mulxq 0x68(%rsp), %r9, %r10
mulxq 0x78(%rsp), %r11, %r12
movq 0x70(%rsp), %rdx
mulxq 0x78(%rsp), %r13, %r14
xorl %ecx, %ecx
mulxq 0x60(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x78(%rsp), %rdx
mulxq 0x68(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
adoxq %rcx, %r14
adcq %rcx, %r14
xorl %ecx, %ecx
adcxq %r9, %r9
adoxq %r15, %r9
movq 0x68(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x70(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x78(%rsp), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %rcx, %r15
adoxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xa0(%rsp)
movq %r13, 0xa8(%rsp)
movq %r14, 0xb0(%rsp)
movq %r15, 0xb8(%rsp)
movq 0x40(%rsp), %rdx
mulxq %rdx, %r8, %r15
mulxq 0x48(%rsp), %r9, %r10
mulxq 0x58(%rsp), %r11, %r12
movq 0x50(%rsp), %rdx
mulxq 0x58(%rsp), %r13, %r14
xorl %ecx, %ecx
mulxq 0x40(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x58(%rsp), %rdx
mulxq 0x48(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
adoxq %rcx, %r14
adcq %rcx, %r14
xorl %ecx, %ecx
adcxq %r9, %r9
adoxq %r15, %r9
movq 0x48(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x50(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x58(%rsp), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %rcx, %r15
adoxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x40(%rsp)
movq %r13, 0x48(%rsp)
movq %r14, 0x50(%rsp)
movq %r15, 0x58(%rsp)
movq $0xffffffffffffffff, %r8
movq %r8, %r10
subq 0xa0(%rsp), %r8
movabsq $0xffffffff00000000, %r9
sbbq 0xa8(%rsp), %r9
sbbq 0xb0(%rsp), %r10
movabsq $0xfffffffeffffffff, %r11
sbbq 0xb8(%rsp), %r11
xorl %r12d, %r12d
movq $0x9, %rdx
mulxq %r8, %r8, %rax
mulxq %r9, %r9, %rcx
addq %rax, %r9
mulxq %r10, %r10, %rax
adcq %rcx, %r10
mulxq %r11, %r11, %rcx
adcq %rax, %r11
adcq %rcx, %r12
movq $0xc, %rdx
xorl %eax, %eax
mulxq 0x80(%rsp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
mulxq 0x88(%rsp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
mulxq 0x90(%rsp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
mulxq 0x98(%rsp), %rax, %rdx
adcxq %rax, %r11
adoxq %r12, %rdx
adcq $0x1, %rdx
movq %rdx, %rax
shlq $0x20, %rax
movq %rax, %rcx
subq %rdx, %rax
addq %rdx, %r8
adcq %rax, %r9
adcq $0x0, %r10
adcq %rcx, %r11
sbbq %rdx, %rdx
notq %rdx
movabsq $0xffffffff00000000, %rax
andq %rdx, %rax
movq %rdx, %rcx
btr $0x20, %rcx
addq %rdx, %r8
movq %r8, 0xa0(%rsp)
adcq %rax, %r9
movq %r9, 0xa8(%rsp)
adcq %rdx, %r10
movq %r10, 0xb0(%rsp)
adcq %rcx, %r11
movq %r11, 0xb8(%rsp)
movq 0x40(%rsp), %rax
subq (%rsp), %rax
movq 0x48(%rsp), %rcx
sbbq 0x8(%rsp), %rcx
movq 0x50(%rsp), %r8
sbbq 0x10(%rsp), %r8
movq 0x58(%rsp), %r9
sbbq 0x18(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x40(%rsp)
adcq %r10, %rcx
movq %rcx, 0x48(%rsp)
adcq %r11, %r8
movq %r8, 0x50(%rsp)
adcq %rdx, %r9
movq %r9, 0x58(%rsp)
movq 0x20(%rsp), %rdx
mulxq %rdx, %r8, %r15
mulxq 0x28(%rsp), %r9, %r10
mulxq 0x38(%rsp), %r11, %r12
movq 0x30(%rsp), %rdx
mulxq 0x38(%rsp), %r13, %r14
xorl %ecx, %ecx
mulxq 0x20(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x28(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq 0x38(%rsp), %rdx
mulxq 0x28(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
adcxq %rcx, %r13
adoxq %rcx, %r14
adcq %rcx, %r14
xorl %ecx, %ecx
adcxq %r9, %r9
adoxq %r15, %r9
movq 0x28(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 0x30(%rsp), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 0x38(%rsp), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq %rcx, %r15
adoxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, (%rsp)
movq %r13, 0x8(%rsp)
movq %r14, 0x10(%rsp)
movq %r15, 0x18(%rsp)
xorl %ecx, %ecx
movq 0x60(%rsp), %rdx
mulxq 0xa0(%rsp), %r8, %r9
mulxq 0xa8(%rsp), %rax, %r10
addq %rax, %r9
mulxq 0xb0(%rsp), %rax, %r11
adcq %rax, %r10
mulxq 0xb8(%rsp), %rax, %r12
adcq %rax, %r11
adcq %rcx, %r12
xorl %ecx, %ecx
movq 0x68(%rsp), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb8(%rsp), %rax, %r13
adcxq %rax, %r12
adoxq %rcx, %r13
adcxq %rcx, %r13
xorl %ecx, %ecx
movq 0x70(%rsp), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb8(%rsp), %rax, %r14
adcxq %rax, %r13
adoxq %rcx, %r14
adcxq %rcx, %r14
xorl %ecx, %ecx
movq 0x78(%rsp), %rdx
mulxq 0xa0(%rsp), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0xa8(%rsp), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0xb0(%rsp), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0xb8(%rsp), %rax, %r15
adcxq %rax, %r14
adoxq %rcx, %r15
adcxq %rcx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x60(%rsp)
movq %r13, 0x68(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x78(%rsp)
movq 0x40(%rsp), %rax
subq 0x20(%rsp), %rax
movq 0x48(%rsp), %rcx
sbbq 0x28(%rsp), %rcx
movq 0x50(%rsp), %r8
sbbq 0x30(%rsp), %r8
movq 0x58(%rsp), %r9
sbbq 0x38(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x40(%rdi)
adcq %r10, %rcx
movq %rcx, 0x48(%rdi)
adcq %r11, %r8
movq %r8, 0x50(%rdi)
adcq %rdx, %r9
movq %r9, 0x58(%rdi)
movq 0x98(%rsp), %r11
movq %r11, %rdx
movq 0x90(%rsp), %r10
shldq $0x2, %r10, %r11
movq 0x88(%rsp), %r9
shldq $0x2, %r9, %r10
movq 0x80(%rsp), %r8
shldq $0x2, %r8, %r9
shlq $0x2, %r8
shrq $0x3e, %rdx
addq $0x1, %rdx
subq 0xa0(%rsp), %r8
sbbq 0xa8(%rsp), %r9
sbbq 0xb0(%rsp), %r10
sbbq 0xb8(%rsp), %r11
sbbq $0x0, %rdx
movq %rdx, %rax
shlq $0x20, %rax
movq %rax, %rcx
subq %rdx, %rax
addq %rdx, %r8
adcq %rax, %r9
adcq $0x0, %r10
adcq %rcx, %r11
sbbq %rdx, %rdx
notq %rdx
movabsq $0xffffffff00000000, %rax
andq %rdx, %rax
movq %rdx, %rcx
btr $0x20, %rcx
addq %rdx, %r8
movq %r8, (%rdi)
adcq %rax, %r9
movq %r9, 0x8(%rdi)
adcq %rdx, %r10
movq %r10, 0x10(%rdi)
adcq %rcx, %r11
movq %r11, 0x18(%rdi)
movq $0xffffffffffffffff, %r8
movq %r8, %r10
subq (%rsp), %r8
movabsq $0xffffffff00000000, %r9
sbbq 0x8(%rsp), %r9
sbbq 0x10(%rsp), %r10
movabsq $0xfffffffeffffffff, %r11
sbbq 0x18(%rsp), %r11
movq %r11, %r12
shldq $0x3, %r10, %r11
shldq $0x3, %r9, %r10
shldq $0x3, %r8, %r9
shlq $0x3, %r8
shrq $0x3d, %r12
movq $0x3, %rdx
xorl %eax, %eax
mulxq 0x60(%rsp), %rax, %rcx
adcxq %rax, %r8
adoxq %rcx, %r9
mulxq 0x68(%rsp), %rax, %rcx
adcxq %rax, %r9
adoxq %rcx, %r10
mulxq 0x70(%rsp), %rax, %rcx
adcxq %rax, %r10
adoxq %rcx, %r11
mulxq 0x78(%rsp), %rax, %rdx
adcxq %rax, %r11
adoxq %r12, %rdx
adcq $0x1, %rdx
movq %rdx, %rax
shlq $0x20, %rax
movq %rax, %rcx
subq %rdx, %rax
addq %rdx, %r8
adcq %rax, %r9
adcq $0x0, %r10
adcq %rcx, %r11
sbbq %rdx, %rdx
notq %rdx
movabsq $0xffffffff00000000, %rax
andq %rdx, %rax
movq %rdx, %rcx
btr $0x20, %rcx
addq %rdx, %r8
movq %r8, 0x20(%rdi)
adcq %rax, %r9
movq %r9, 0x28(%rdi)
adcq %rdx, %r10
movq %r10, 0x30(%rdi)
adcq %rcx, %r11
movq %r11, 0x38(%rdi)
CFI_INC_RSP(192)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(Lsm2_montjscalarmul_sm2_montjdouble)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 127,729
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/sm2_montjscalarmul_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery-Jacobian form scalar multiplication for GM/T 0003-2012 curve SM2
// Input scalar[4], point[12]; output res[12]
//
// extern void sm2_montjscalarmul_alt
// (uint64_t res[static 12],
// const uint64_t scalar[static 4],
// const uint64_t point[static 12]);
//
// This function is a variant of its affine point version sm2_scalarmul.
// Here, input and output points are assumed to be in Jacobian form with
// their coordinates in the Montgomery domain. Thus, if priming indicates
// Montgomery form, x' = (2^256 * x) mod p_sm2 etc., each point argument
// is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when
// z' is nonzero or the point at infinity (group identity) if z' = 0.
//
// Given scalar = n and point = P, assumed to be on the NIST elliptic
// curve SM2, returns a representation of n * P. If the result is the
// point at infinity (either because the input point was or because the
// scalar was a multiple of p_sm2) then the output is guaranteed to
// represent the point at infinity, i.e. to have its z coordinate zero.
//
// Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point
// Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjscalarmul_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjscalarmul_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjscalarmul_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 32
// Intermediate variables on the stack. Uppercase syntactic variants
// make x86_att version simpler to generate.
#define SCALARB (0*NUMSIZE)
#define scalarb (0*NUMSIZE)(%rsp)
#define ACC (1*NUMSIZE)
#define acc (1*NUMSIZE)(%rsp)
#define TABENT (4*NUMSIZE)
#define tabent (4*NUMSIZE)(%rsp)
#define TAB (7*NUMSIZE)
#define tab (7*NUMSIZE)(%rsp)
#define res (31*NUMSIZE)(%rsp)
#define NSPACE 32*NUMSIZE
// Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator,
// which doesn't accept repetitions, assembler macros etc.
#define selectblock(I) \
cmpq $I, %rdi ; \
cmovzq TAB+96*(I-1)(%rsp), %rax ; \
cmovzq TAB+96*(I-1)+8(%rsp), %rbx ; \
cmovzq TAB+96*(I-1)+16(%rsp), %rcx ; \
cmovzq TAB+96*(I-1)+24(%rsp), %rdx ; \
cmovzq TAB+96*(I-1)+32(%rsp), %r8 ; \
cmovzq TAB+96*(I-1)+40(%rsp), %r9 ; \
cmovzq TAB+96*(I-1)+48(%rsp), %r10 ; \
cmovzq TAB+96*(I-1)+56(%rsp), %r11 ; \
cmovzq TAB+96*(I-1)+64(%rsp), %r12 ; \
cmovzq TAB+96*(I-1)+72(%rsp), %r13 ; \
cmovzq TAB+96*(I-1)+80(%rsp), %r14 ; \
cmovzq TAB+96*(I-1)+88(%rsp), %r15
S2N_BN_SYMBOL(sm2_montjscalarmul_alt):
CFI_START
_CET_ENDBR
// The Windows version literally calls the standard ABI version.
// This simplifies the proofs since subroutine offsets are fixed.
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
CFI_CALL(Lsm2_montjscalarmul_alt_standard)
CFI_POP(%rsi)
CFI_POP(%rdi)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(sm2_montjscalarmul_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lsm2_montjscalarmul_alt_standard)
Lsm2_montjscalarmul_alt_standard:
CFI_START
#endif
// Real start of the standard ABI code.
CFI_PUSH(%r15)
CFI_PUSH(%r14)
CFI_PUSH(%r13)
CFI_PUSH(%r12)
CFI_PUSH(%rbp)
CFI_PUSH(%rbx)
CFI_DEC_RSP(NSPACE)
// Preserve the "res" and "point" input arguments. We load and process the
// scalar immediately so we don't bother preserving that input argument.
// Also, "point" is only needed early on and so its register gets re-used.
movq %rdx, %rbx
movq %rdi, res
// Load the digits of group order n_sm2 = [%r15;%r14;%r13;%r12]
movq $0x53bbf40939d54123, %r12
movq $0x7203df6b21c6052b, %r13
movq $0xffffffffffffffff, %r14
movq $0xfffffffeffffffff, %r15
// First, reduce the input scalar mod n_sm2, i.e. conditionally subtract n_sm2
movq (%rsi), %r8
subq %r12, %r8
movq 8(%rsi), %r9
sbbq %r13, %r9
movq 16(%rsi), %r10
sbbq %r14, %r10
movq 24(%rsi), %r11
sbbq %r15, %r11
cmovcq (%rsi), %r8
cmovcq 8(%rsi), %r9
cmovcq 16(%rsi), %r10
cmovcq 24(%rsi), %r11
// Now if the top bit of the reduced scalar is set, negate it mod n_sm2,
// i.e. do n |-> n_sm2 - n. Remember the sign in %rbp so we can
// correspondingly negate the point below.
subq %r8, %r12
sbbq %r9, %r13
sbbq %r10, %r14
sbbq %r11, %r15
movq %r11, %rbp
shrq $63, %rbp
cmovnzq %r12, %r8
cmovnzq %r13, %r9
cmovnzq %r14, %r10
cmovnzq %r15, %r11
// In either case then add the recoding constant 0x08888...888 to allow
// signed digits.
movq $0x8888888888888888, %rax
addq %rax, %r8
adcq %rax, %r9
adcq %rax, %r10
adcq %rax, %r11
btc $63, %r11
movq %r8, SCALARB(%rsp)
movq %r9, SCALARB+8(%rsp)
movq %r10, SCALARB+16(%rsp)
movq %r11, SCALARB+24(%rsp)
// Set the tab[0] table entry to the input point = 1 * P, except
// that we negate it if the top bit of the scalar was set. This
// negation takes care over the y = 0 case to maintain all the
// coordinates < p_sm2 throughout, even though triples (x,y,z)
// with y = 0 can only represent a point on the curve when z = 0
// and it represents the point at infinity regardless of x and y.
movq (%rbx), %rax
movq %rax, TAB(%rsp)
movq 8(%rbx), %rax
movq %rax, TAB+8(%rsp)
movq 16(%rbx), %rax
movq %rax, TAB+16(%rsp)
movq 24(%rbx), %rax
movq %rax, TAB+24(%rsp)
movq 32(%rbx), %r12
movq %r12, %rax
movq 40(%rbx), %r13
orq %r13, %rax
movq 48(%rbx), %r14
movq %r14, %rcx
movq 56(%rbx), %r15
orq %r15, %rcx
orq %rcx, %rax
cmovzq %rax, %rbp
xorl %r11d, %r11d
movl $0x00000000ffffffff, %r9d
notq %r11
movq %r11, %r8
movq %r11, %r10
xorq %r8, %r9
btr $32, %r11
subq %r12, %r8
sbbq %r13, %r9
sbbq %r14, %r10
sbbq %r15, %r11
testq %rbp, %rbp
cmovzq %r12, %r8
cmovzq %r13, %r9
cmovzq %r14, %r10
cmovzq %r15, %r11
movq %r8, TAB+32(%rsp)
movq %r9, TAB+40(%rsp)
movq %r10, TAB+48(%rsp)
movq %r11, TAB+56(%rsp)
movq 64(%rbx), %rax
movq %rax, TAB+64(%rsp)
movq 72(%rbx), %rax
movq %rax, TAB+72(%rsp)
movq 80(%rbx), %rax
movq %rax, TAB+80(%rsp)
movq 88(%rbx), %rax
movq %rax, TAB+88(%rsp)
// Compute and record tab[1] = 2 * p, ..., tab[7] = 8 * P
leaq TAB+96*1(%rsp), %rdi
leaq TAB(%rsp), %rsi
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjdouble)
leaq TAB+96*2(%rsp), %rdi
leaq TAB+96*1(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjadd)
leaq TAB+96*3(%rsp), %rdi
leaq TAB+96*1(%rsp), %rsi
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjdouble)
leaq TAB+96*4(%rsp), %rdi
leaq TAB+96*3(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjadd)
leaq TAB+96*5(%rsp), %rdi
leaq TAB+96*2(%rsp), %rsi
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjdouble)
leaq TAB+96*6(%rsp), %rdi
leaq TAB+96*5(%rsp), %rsi
leaq TAB(%rsp), %rdx
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjadd)
leaq TAB+96*7(%rsp), %rdi
leaq TAB+96*3(%rsp), %rsi
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjdouble)
// Set up accumulator as table entry for top 4 bits (constant-time indexing)
movq SCALARB+24(%rsp), %rdi
shrq $60, %rdi
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
xorl %r10d, %r10d
xorl %r11d, %r11d
xorl %r12d, %r12d
xorl %r13d, %r13d
xorl %r14d, %r14d
xorl %r15d, %r15d
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
movq %rax, ACC(%rsp)
movq %rbx, ACC+8(%rsp)
movq %rcx, ACC+16(%rsp)
movq %rdx, ACC+24(%rsp)
movq %r8, ACC+32(%rsp)
movq %r9, ACC+40(%rsp)
movq %r10, ACC+48(%rsp)
movq %r11, ACC+56(%rsp)
movq %r12, ACC+64(%rsp)
movq %r13, ACC+72(%rsp)
movq %r14, ACC+80(%rsp)
movq %r15, ACC+88(%rsp)
// Main loop over size-4 bitfield
movl $252, %ebp
Lsm2_montjscalarmul_alt_mainloop:
subq $4, %rbp
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjdouble)
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjdouble)
movq %rbp, %rax
shrq $6, %rax
movq (%rsp,%rax,8), %rdi
movq %rbp, %rcx
shrq %cl, %rdi
andq $15, %rdi
subq $8, %rdi
sbbq %rsi, %rsi // %rsi = sign of digit (-1 = negative)
xorq %rsi, %rdi
subq %rsi, %rdi // %rdi = absolute value of digit
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
xorl %r9d, %r9d
xorl %r10d, %r10d
xorl %r11d, %r11d
xorl %r12d, %r12d
xorl %r13d, %r13d
xorl %r14d, %r14d
xorl %r15d, %r15d
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
// Store it to "tabent" with the y coordinate optionally negated
// Again, do it carefully to give coordinates < p_sm2 even in
// the degenerate case y = 0 (when z = 0 for points on the curve).
movq %rax, TABENT(%rsp)
movq %rbx, TABENT+8(%rsp)
movq %rcx, TABENT+16(%rsp)
movq %rdx, TABENT+24(%rsp)
movq %r12, TABENT+64(%rsp)
movq %r13, TABENT+72(%rsp)
movq %r14, TABENT+80(%rsp)
movq %r15, TABENT+88(%rsp)
xorl %r15d, %r15d
movq %r8, %rax
movl $0x00000000ffffffff, %r13d
orq %r9, %rax
notq %r15
movq %r10, %rcx
movq %r15, %r12
orq %r11, %rcx
movq %r15, %r14
xorq %r12, %r13
btr $32, %r15
orq %rcx, %rax
cmovzq %rax, %rsi
subq %r8, %r12
sbbq %r9, %r13
sbbq %r10, %r14
sbbq %r11, %r15
testq %rsi, %rsi
cmovnzq %r12, %r8
cmovnzq %r13, %r9
cmovnzq %r14, %r10
cmovnzq %r15, %r11
movq %r8, TABENT+32(%rsp)
movq %r9, TABENT+40(%rsp)
movq %r10, TABENT+48(%rsp)
movq %r11, TABENT+56(%rsp)
leaq TABENT(%rsp), %rdx
leaq ACC(%rsp), %rsi
leaq ACC(%rsp), %rdi
CFI_CALL(Lsm2_montjscalarmul_alt_sm2_montjadd)
testq %rbp, %rbp
jne Lsm2_montjscalarmul_alt_mainloop
// That's the end of the main loop, and we just need to copy the
// result in "acc" to the output.
movq res, %rdi
movq ACC(%rsp), %rax
movq %rax, (%rdi)
movq ACC+8(%rsp), %rax
movq %rax, 8(%rdi)
movq ACC+16(%rsp), %rax
movq %rax, 16(%rdi)
movq ACC+24(%rsp), %rax
movq %rax, 24(%rdi)
movq ACC+32(%rsp), %rax
movq %rax, 32(%rdi)
movq ACC+40(%rsp), %rax
movq %rax, 40(%rdi)
movq ACC+48(%rsp), %rax
movq %rax, 48(%rdi)
movq ACC+56(%rsp), %rax
movq %rax, 56(%rdi)
movq ACC+64(%rsp), %rax
movq %rax, 64(%rdi)
movq ACC+72(%rsp), %rax
movq %rax, 72(%rdi)
movq ACC+80(%rsp), %rax
movq %rax, 80(%rdi)
movq ACC+88(%rsp), %rax
movq %rax, 88(%rdi)
// Restore stack and registers and return
CFI_INC_RSP(NSPACE)
CFI_POP(%rbx)
CFI_POP(%rbp)
CFI_POP(%r12)
CFI_POP(%r13)
CFI_POP(%r14)
CFI_POP(%r15)
CFI_RET
#if WINDOWS_ABI
S2N_BN_SIZE_DIRECTIVE(Lsm2_montjscalarmul_alt_standard)
#else
S2N_BN_SIZE_DIRECTIVE(sm2_montjscalarmul_alt)
#endif
// Local copies of subroutines, complete clones at the moment
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lsm2_montjscalarmul_alt_sm2_montjadd)
Lsm2_montjscalarmul_alt_sm2_montjadd:
CFI_START
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(224)
movq %rdx, %rbp
movq 0x40(%rsi), %rax
movq %rax, %rbx
mulq %rax
movq %rax, %r8
movq %rdx, %r15
movq 0x48(%rsi), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x58(%rsi), %rax
movq %rax, %r13
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x50(%rsi), %rax
movq %rax, %rbx
mulq %r13
movq %rax, %r13
movq %rdx, %r14
movq 0x40(%rsi), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rcx, %rcx
movq 0x48(%rsi), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rcx, %rcx
movq 0x58(%rsi), %rbx
movq 0x48(%rsi), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorl %ecx, %ecx
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %rcx, %rcx
movq 0x48(%rsi), %rax
mulq %rax
addq %r15, %r9
adcq %rax, %r10
adcq %rdx, %r11
sbbq %r15, %r15
movq 0x50(%rsi), %rax
mulq %rax
negq %r15
adcq %rax, %r12
adcq %rdx, %r13
sbbq %r15, %r15
movq 0x58(%rsi), %rax
mulq %rax
negq %r15
adcq %rax, %r14
adcq %rcx, %rdx
movq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, (%rsp)
movq %r13, 0x8(%rsp)
movq %r14, 0x10(%rsp)
movq %r15, 0x18(%rsp)
movq 0x40(%rbp), %rax
movq %rax, %rbx
mulq %rax
movq %rax, %r8
movq %rdx, %r15
movq 0x48(%rbp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x58(%rbp), %rax
movq %rax, %r13
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x50(%rbp), %rax
movq %rax, %rbx
mulq %r13
movq %rax, %r13
movq %rdx, %r14
movq 0x40(%rbp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rcx, %rcx
movq 0x48(%rbp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rcx, %rcx
movq 0x58(%rbp), %rbx
movq 0x48(%rbp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorl %ecx, %ecx
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %rcx, %rcx
movq 0x48(%rbp), %rax
mulq %rax
addq %r15, %r9
adcq %rax, %r10
adcq %rdx, %r11
sbbq %r15, %r15
movq 0x50(%rbp), %rax
mulq %rax
negq %r15
adcq %rax, %r12
adcq %rdx, %r13
sbbq %r15, %r15
movq 0x58(%rbp), %rax
mulq %rax
negq %r15
adcq %rax, %r14
adcq %rcx, %rdx
movq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xa0(%rsp)
movq %r13, 0xa8(%rsp)
movq %r14, 0xb0(%rsp)
movq %r15, 0xb8(%rsp)
movq 0x40(%rbp), %rax
mulq 0x20(%rsi)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0x40(%rbp), %rax
mulq 0x28(%rsi)
addq %rax, %r9
adcq %rdx, %r10
movq 0x48(%rbp), %rax
mulq 0x20(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0x40(%rbp), %rax
mulq 0x30(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0x48(%rbp), %rax
mulq 0x28(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0x50(%rbp), %rax
mulq 0x20(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0x40(%rbp), %rax
mulq 0x38(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0x48(%rbp), %rax
mulq 0x30(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x50(%rbp), %rax
mulq 0x28(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x58(%rbp), %rax
mulq 0x20(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0x48(%rbp), %rax
mulq 0x38(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0x50(%rbp), %rax
mulq 0x30(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0x58(%rbp), %rax
mulq 0x28(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0x50(%rbp), %rax
mulq 0x38(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0x58(%rbp), %rax
mulq 0x30(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0x58(%rbp), %rax
mulq 0x38(%rsi)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xc0(%rsp)
movq %r13, 0xc8(%rsp)
movq %r14, 0xd0(%rsp)
movq %r15, 0xd8(%rsp)
movq 0x40(%rsi), %rax
mulq 0x20(%rbp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0x40(%rsi), %rax
mulq 0x28(%rbp)
addq %rax, %r9
adcq %rdx, %r10
movq 0x48(%rsi), %rax
mulq 0x20(%rbp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0x40(%rsi), %rax
mulq 0x30(%rbp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0x48(%rsi), %rax
mulq 0x28(%rbp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0x50(%rsi), %rax
mulq 0x20(%rbp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0x40(%rsi), %rax
mulq 0x38(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0x48(%rsi), %rax
mulq 0x30(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x50(%rsi), %rax
mulq 0x28(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x58(%rsi), %rax
mulq 0x20(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0x48(%rsi), %rax
mulq 0x38(%rbp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0x50(%rsi), %rax
mulq 0x30(%rbp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0x58(%rsi), %rax
mulq 0x28(%rbp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0x50(%rsi), %rax
mulq 0x38(%rbp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0x58(%rsi), %rax
mulq 0x30(%rbp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0x58(%rsi), %rax
mulq 0x38(%rbp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x20(%rsp)
movq %r13, 0x28(%rsp)
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq (%rsp), %rax
mulq 0x0(%rbp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq (%rsp), %rax
mulq 0x8(%rbp)
addq %rax, %r9
adcq %rdx, %r10
movq 0x8(%rsp), %rax
mulq 0x0(%rbp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq (%rsp), %rax
mulq 0x10(%rbp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0x8(%rsp), %rax
mulq 0x8(%rbp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0x10(%rsp), %rax
mulq 0x0(%rbp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq (%rsp), %rax
mulq 0x18(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0x8(%rsp), %rax
mulq 0x10(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x10(%rsp), %rax
mulq 0x8(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x18(%rsp), %rax
mulq 0x0(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0x8(%rsp), %rax
mulq 0x18(%rbp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0x10(%rsp), %rax
mulq 0x10(%rbp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0x18(%rsp), %rax
mulq 0x8(%rbp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0x10(%rsp), %rax
mulq 0x18(%rbp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0x18(%rsp), %rax
mulq 0x10(%rbp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0x18(%rsp), %rax
mulq 0x18(%rbp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x40(%rsp)
movq %r13, 0x48(%rsp)
movq %r14, 0x50(%rsp)
movq %r15, 0x58(%rsp)
movq 0xa0(%rsp), %rax
mulq (%rsi)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0xa0(%rsp), %rax
mulq 0x8(%rsi)
addq %rax, %r9
adcq %rdx, %r10
movq 0xa8(%rsp), %rax
mulq (%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0xa0(%rsp), %rax
mulq 0x10(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0xa8(%rsp), %rax
mulq 0x8(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0xb0(%rsp), %rax
mulq (%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0xa0(%rsp), %rax
mulq 0x18(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0xa8(%rsp), %rax
mulq 0x10(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0xb0(%rsp), %rax
mulq 0x8(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0xb8(%rsp), %rax
mulq (%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0xa8(%rsp), %rax
mulq 0x18(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0xb0(%rsp), %rax
mulq 0x10(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0xb8(%rsp), %rax
mulq 0x8(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0xb0(%rsp), %rax
mulq 0x18(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0xb8(%rsp), %rax
mulq 0x10(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0xb8(%rsp), %rax
mulq 0x18(%rsi)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x80(%rsp)
movq %r13, 0x88(%rsp)
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq (%rsp), %rax
mulq 0x20(%rsp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq (%rsp), %rax
mulq 0x28(%rsp)
addq %rax, %r9
adcq %rdx, %r10
movq 0x8(%rsp), %rax
mulq 0x20(%rsp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq (%rsp), %rax
mulq 0x30(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0x8(%rsp), %rax
mulq 0x28(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0x10(%rsp), %rax
mulq 0x20(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq (%rsp), %rax
mulq 0x38(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0x8(%rsp), %rax
mulq 0x30(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x10(%rsp), %rax
mulq 0x28(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x18(%rsp), %rax
mulq 0x20(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0x8(%rsp), %rax
mulq 0x38(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0x10(%rsp), %rax
mulq 0x30(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0x18(%rsp), %rax
mulq 0x28(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0x10(%rsp), %rax
mulq 0x38(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0x18(%rsp), %rax
mulq 0x30(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0x18(%rsp), %rax
mulq 0x38(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x20(%rsp)
movq %r13, 0x28(%rsp)
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq 0xa0(%rsp), %rax
mulq 0xc0(%rsp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0xa0(%rsp), %rax
mulq 0xc8(%rsp)
addq %rax, %r9
adcq %rdx, %r10
movq 0xa8(%rsp), %rax
mulq 0xc0(%rsp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0xa0(%rsp), %rax
mulq 0xd0(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0xa8(%rsp), %rax
mulq 0xc8(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0xb0(%rsp), %rax
mulq 0xc0(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0xa0(%rsp), %rax
mulq 0xd8(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0xa8(%rsp), %rax
mulq 0xd0(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0xb0(%rsp), %rax
mulq 0xc8(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0xb8(%rsp), %rax
mulq 0xc0(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0xa8(%rsp), %rax
mulq 0xd8(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0xb0(%rsp), %rax
mulq 0xd0(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0xb8(%rsp), %rax
mulq 0xc8(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0xb0(%rsp), %rax
mulq 0xd8(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0xb8(%rsp), %rax
mulq 0xd0(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0xb8(%rsp), %rax
mulq 0xd8(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xc0(%rsp)
movq %r13, 0xc8(%rsp)
movq %r14, 0xd0(%rsp)
movq %r15, 0xd8(%rsp)
movq 0x40(%rsp), %rax
subq 0x80(%rsp), %rax
movq 0x48(%rsp), %rcx
sbbq 0x88(%rsp), %rcx
movq 0x50(%rsp), %r8
sbbq 0x90(%rsp), %r8
movq 0x58(%rsp), %r9
sbbq 0x98(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0xa0(%rsp)
adcq %r10, %rcx
movq %rcx, 0xa8(%rsp)
adcq %r11, %r8
movq %r8, 0xb0(%rsp)
adcq %rdx, %r9
movq %r9, 0xb8(%rsp)
movq 0x20(%rsp), %rax
subq 0xc0(%rsp), %rax
movq 0x28(%rsp), %rcx
sbbq 0xc8(%rsp), %rcx
movq 0x30(%rsp), %r8
sbbq 0xd0(%rsp), %r8
movq 0x38(%rsp), %r9
sbbq 0xd8(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x20(%rsp)
adcq %r10, %rcx
movq %rcx, 0x28(%rsp)
adcq %r11, %r8
movq %r8, 0x30(%rsp)
adcq %rdx, %r9
movq %r9, 0x38(%rsp)
movq 0xa0(%rsp), %rax
movq %rax, %rbx
mulq %rax
movq %rax, %r8
movq %rdx, %r15
movq 0xa8(%rsp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0xb8(%rsp), %rax
movq %rax, %r13
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0xb0(%rsp), %rax
movq %rax, %rbx
mulq %r13
movq %rax, %r13
movq %rdx, %r14
movq 0xa0(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rcx, %rcx
movq 0xa8(%rsp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rcx, %rcx
movq 0xb8(%rsp), %rbx
movq 0xa8(%rsp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorl %ecx, %ecx
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %rcx, %rcx
movq 0xa8(%rsp), %rax
mulq %rax
addq %r15, %r9
adcq %rax, %r10
adcq %rdx, %r11
sbbq %r15, %r15
movq 0xb0(%rsp), %rax
mulq %rax
negq %r15
adcq %rax, %r12
adcq %rdx, %r13
sbbq %r15, %r15
movq 0xb8(%rsp), %rax
mulq %rax
negq %r15
adcq %rax, %r14
adcq %rcx, %rdx
movq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x60(%rsp)
movq %r13, 0x68(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x78(%rsp)
movq 0x20(%rsp), %rax
movq %rax, %rbx
mulq %rax
movq %rax, %r8
movq %rdx, %r15
movq 0x28(%rsp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x38(%rsp), %rax
movq %rax, %r13
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x30(%rsp), %rax
movq %rax, %rbx
mulq %r13
movq %rax, %r13
movq %rdx, %r14
movq 0x20(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rcx, %rcx
movq 0x28(%rsp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rcx, %rcx
movq 0x38(%rsp), %rbx
movq 0x28(%rsp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorl %ecx, %ecx
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %rcx, %rcx
movq 0x28(%rsp), %rax
mulq %rax
addq %r15, %r9
adcq %rax, %r10
adcq %rdx, %r11
sbbq %r15, %r15
movq 0x30(%rsp), %rax
mulq %rax
negq %r15
adcq %rax, %r12
adcq %rdx, %r13
sbbq %r15, %r15
movq 0x38(%rsp), %rax
mulq %rax
negq %r15
adcq %rax, %r14
adcq %rcx, %rdx
movq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, (%rsp)
movq %r13, 0x8(%rsp)
movq %r14, 0x10(%rsp)
movq %r15, 0x18(%rsp)
movq 0x60(%rsp), %rax
mulq 0x80(%rsp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0x60(%rsp), %rax
mulq 0x88(%rsp)
addq %rax, %r9
adcq %rdx, %r10
movq 0x68(%rsp), %rax
mulq 0x80(%rsp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0x60(%rsp), %rax
mulq 0x90(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0x68(%rsp), %rax
mulq 0x88(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0x70(%rsp), %rax
mulq 0x80(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0x60(%rsp), %rax
mulq 0x98(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0x68(%rsp), %rax
mulq 0x90(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x70(%rsp), %rax
mulq 0x88(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x78(%rsp), %rax
mulq 0x80(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0x68(%rsp), %rax
mulq 0x98(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0x70(%rsp), %rax
mulq 0x90(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0x78(%rsp), %rax
mulq 0x88(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0x70(%rsp), %rax
mulq 0x98(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0x78(%rsp), %rax
mulq 0x90(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0x78(%rsp), %rax
mulq 0x98(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x80(%rsp)
movq %r13, 0x88(%rsp)
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq 0x60(%rsp), %rax
mulq 0x40(%rsp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0x60(%rsp), %rax
mulq 0x48(%rsp)
addq %rax, %r9
adcq %rdx, %r10
movq 0x68(%rsp), %rax
mulq 0x40(%rsp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0x60(%rsp), %rax
mulq 0x50(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0x68(%rsp), %rax
mulq 0x48(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0x70(%rsp), %rax
mulq 0x40(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0x60(%rsp), %rax
mulq 0x58(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0x68(%rsp), %rax
mulq 0x50(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x70(%rsp), %rax
mulq 0x48(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x78(%rsp), %rax
mulq 0x40(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0x68(%rsp), %rax
mulq 0x58(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0x70(%rsp), %rax
mulq 0x50(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0x78(%rsp), %rax
mulq 0x48(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0x70(%rsp), %rax
mulq 0x58(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0x78(%rsp), %rax
mulq 0x50(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0x78(%rsp), %rax
mulq 0x58(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x40(%rsp)
movq %r13, 0x48(%rsp)
movq %r14, 0x50(%rsp)
movq %r15, 0x58(%rsp)
movq (%rsp), %rax
subq 0x80(%rsp), %rax
movq 0x8(%rsp), %rcx
sbbq 0x88(%rsp), %rcx
movq 0x10(%rsp), %r8
sbbq 0x90(%rsp), %r8
movq 0x18(%rsp), %r9
sbbq 0x98(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, (%rsp)
adcq %r10, %rcx
movq %rcx, 0x8(%rsp)
adcq %r11, %r8
movq %r8, 0x10(%rsp)
adcq %rdx, %r9
movq %r9, 0x18(%rsp)
movq 0x40(%rsp), %rax
subq 0x80(%rsp), %rax
movq 0x48(%rsp), %rcx
sbbq 0x88(%rsp), %rcx
movq 0x50(%rsp), %r8
sbbq 0x90(%rsp), %r8
movq 0x58(%rsp), %r9
sbbq 0x98(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x60(%rsp)
adcq %r10, %rcx
movq %rcx, 0x68(%rsp)
adcq %r11, %r8
movq %r8, 0x70(%rsp)
adcq %rdx, %r9
movq %r9, 0x78(%rsp)
movq 0xa0(%rsp), %rax
mulq 0x40(%rsi)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0xa0(%rsp), %rax
mulq 0x48(%rsi)
addq %rax, %r9
adcq %rdx, %r10
movq 0xa8(%rsp), %rax
mulq 0x40(%rsi)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0xa0(%rsp), %rax
mulq 0x50(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0xa8(%rsp), %rax
mulq 0x48(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0xb0(%rsp), %rax
mulq 0x40(%rsi)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0xa0(%rsp), %rax
mulq 0x58(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0xa8(%rsp), %rax
mulq 0x50(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0xb0(%rsp), %rax
mulq 0x48(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0xb8(%rsp), %rax
mulq 0x40(%rsi)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0xa8(%rsp), %rax
mulq 0x58(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0xb0(%rsp), %rax
mulq 0x50(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0xb8(%rsp), %rax
mulq 0x48(%rsi)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0xb0(%rsp), %rax
mulq 0x58(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0xb8(%rsp), %rax
mulq 0x50(%rsi)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0xb8(%rsp), %rax
mulq 0x58(%rsi)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xa0(%rsp)
movq %r13, 0xa8(%rsp)
movq %r14, 0xb0(%rsp)
movq %r15, 0xb8(%rsp)
movq (%rsp), %rax
subq 0x40(%rsp), %rax
movq 0x8(%rsp), %rcx
sbbq 0x48(%rsp), %rcx
movq 0x10(%rsp), %r8
sbbq 0x50(%rsp), %r8
movq 0x18(%rsp), %r9
sbbq 0x58(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, (%rsp)
adcq %r10, %rcx
movq %rcx, 0x8(%rsp)
adcq %r11, %r8
movq %r8, 0x10(%rsp)
adcq %rdx, %r9
movq %r9, 0x18(%rsp)
movq 0x80(%rsp), %rax
subq (%rsp), %rax
movq 0x88(%rsp), %rcx
sbbq 0x8(%rsp), %rcx
movq 0x90(%rsp), %r8
sbbq 0x10(%rsp), %r8
movq 0x98(%rsp), %r9
sbbq 0x18(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x80(%rsp)
adcq %r10, %rcx
movq %rcx, 0x88(%rsp)
adcq %r11, %r8
movq %r8, 0x90(%rsp)
adcq %rdx, %r9
movq %r9, 0x98(%rsp)
movq 0x60(%rsp), %rax
mulq 0xc0(%rsp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0x60(%rsp), %rax
mulq 0xc8(%rsp)
addq %rax, %r9
adcq %rdx, %r10
movq 0x68(%rsp), %rax
mulq 0xc0(%rsp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0x60(%rsp), %rax
mulq 0xd0(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0x68(%rsp), %rax
mulq 0xc8(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0x70(%rsp), %rax
mulq 0xc0(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0x60(%rsp), %rax
mulq 0xd8(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0x68(%rsp), %rax
mulq 0xd0(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x70(%rsp), %rax
mulq 0xc8(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x78(%rsp), %rax
mulq 0xc0(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0x68(%rsp), %rax
mulq 0xd8(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0x70(%rsp), %rax
mulq 0xd0(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0x78(%rsp), %rax
mulq 0xc8(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0x70(%rsp), %rax
mulq 0xd8(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0x78(%rsp), %rax
mulq 0xd0(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0x78(%rsp), %rax
mulq 0xd8(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x60(%rsp)
movq %r13, 0x68(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x78(%rsp)
movq 0xa0(%rsp), %rax
mulq 0x40(%rbp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0xa0(%rsp), %rax
mulq 0x48(%rbp)
addq %rax, %r9
adcq %rdx, %r10
movq 0xa8(%rsp), %rax
mulq 0x40(%rbp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0xa0(%rsp), %rax
mulq 0x50(%rbp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0xa8(%rsp), %rax
mulq 0x48(%rbp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0xb0(%rsp), %rax
mulq 0x40(%rbp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0xa0(%rsp), %rax
mulq 0x58(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0xa8(%rsp), %rax
mulq 0x50(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0xb0(%rsp), %rax
mulq 0x48(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0xb8(%rsp), %rax
mulq 0x40(%rbp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0xa8(%rsp), %rax
mulq 0x58(%rbp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0xb0(%rsp), %rax
mulq 0x50(%rbp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0xb8(%rsp), %rax
mulq 0x48(%rbp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0xb0(%rsp), %rax
mulq 0x58(%rbp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0xb8(%rsp), %rax
mulq 0x50(%rbp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0xb8(%rsp), %rax
mulq 0x58(%rbp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xa0(%rsp)
movq %r13, 0xa8(%rsp)
movq %r14, 0xb0(%rsp)
movq %r15, 0xb8(%rsp)
movq 0x20(%rsp), %rax
mulq 0x80(%rsp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0x20(%rsp), %rax
mulq 0x88(%rsp)
addq %rax, %r9
adcq %rdx, %r10
movq 0x28(%rsp), %rax
mulq 0x80(%rsp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0x20(%rsp), %rax
mulq 0x90(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0x28(%rsp), %rax
mulq 0x88(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0x30(%rsp), %rax
mulq 0x80(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0x20(%rsp), %rax
mulq 0x98(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0x28(%rsp), %rax
mulq 0x90(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x30(%rsp), %rax
mulq 0x88(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x38(%rsp), %rax
mulq 0x80(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0x28(%rsp), %rax
mulq 0x98(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0x30(%rsp), %rax
mulq 0x90(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0x38(%rsp), %rax
mulq 0x88(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0x30(%rsp), %rax
mulq 0x98(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0x38(%rsp), %rax
mulq 0x90(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0x38(%rsp), %rax
mulq 0x98(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x80(%rsp)
movq %r13, 0x88(%rsp)
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq 0x80(%rsp), %rax
subq 0x60(%rsp), %rax
movq 0x88(%rsp), %rcx
sbbq 0x68(%rsp), %rcx
movq 0x90(%rsp), %r8
sbbq 0x70(%rsp), %r8
movq 0x98(%rsp), %r9
sbbq 0x78(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x80(%rsp)
adcq %r10, %rcx
movq %rcx, 0x88(%rsp)
adcq %r11, %r8
movq %r8, 0x90(%rsp)
adcq %rdx, %r9
movq %r9, 0x98(%rsp)
movq 0x40(%rsi), %r8
movq 0x48(%rsi), %r9
movq 0x50(%rsi), %r10
movq 0x58(%rsi), %r11
movq %r8, %rax
movq %r9, %rdx
orq %r10, %rax
orq %r11, %rdx
orq %rdx, %rax
negq %rax
sbbq %rax, %rax
movq 0x40(%rbp), %r12
movq 0x48(%rbp), %r13
movq 0x50(%rbp), %r14
movq 0x58(%rbp), %r15
movq %r12, %rbx
movq %r13, %rdx
orq %r14, %rbx
orq %r15, %rdx
orq %rdx, %rbx
negq %rbx
sbbq %rbx, %rbx
cmpq %rax, %rbx
cmovbq %r8, %r12
cmovbq %r9, %r13
cmovbq %r10, %r14
cmovbq %r11, %r15
cmoveq 0xa0(%rsp), %r12
cmoveq 0xa8(%rsp), %r13
cmoveq 0xb0(%rsp), %r14
cmoveq 0xb8(%rsp), %r15
movq (%rsp), %rax
cmovbq (%rsi), %rax
cmova 0x0(%rbp), %rax
movq 0x8(%rsp), %rbx
cmovbq 0x8(%rsi), %rbx
cmova 0x8(%rbp), %rbx
movq 0x10(%rsp), %rcx
cmovbq 0x10(%rsi), %rcx
cmova 0x10(%rbp), %rcx
movq 0x18(%rsp), %rdx
cmovbq 0x18(%rsi), %rdx
cmova 0x18(%rbp), %rdx
movq 0x80(%rsp), %r8
cmovbq 0x20(%rsi), %r8
cmova 0x20(%rbp), %r8
movq 0x88(%rsp), %r9
cmovbq 0x28(%rsi), %r9
cmova 0x28(%rbp), %r9
movq 0x90(%rsp), %r10
cmovbq 0x30(%rsi), %r10
cmova 0x30(%rbp), %r10
movq 0x98(%rsp), %r11
cmovbq 0x38(%rsi), %r11
cmova 0x38(%rbp), %r11
movq %rax, (%rdi)
movq %rbx, 0x8(%rdi)
movq %rcx, 0x10(%rdi)
movq %rdx, 0x18(%rdi)
movq %r8, 0x20(%rdi)
movq %r9, 0x28(%rdi)
movq %r10, 0x30(%rdi)
movq %r11, 0x38(%rdi)
movq %r12, 0x40(%rdi)
movq %r13, 0x48(%rdi)
movq %r14, 0x50(%rdi)
movq %r15, 0x58(%rdi)
CFI_INC_RSP(224)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(Lsm2_montjscalarmul_alt_sm2_montjadd)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lsm2_montjscalarmul_alt_sm2_montjdouble)
Lsm2_montjscalarmul_alt_sm2_montjdouble:
CFI_START
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(192)
movq 0x40(%rsi), %rax
movq %rax, %rbx
mulq %rax
movq %rax, %r8
movq %rdx, %r15
movq 0x48(%rsi), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x58(%rsi), %rax
movq %rax, %r13
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x50(%rsi), %rax
movq %rax, %rbx
mulq %r13
movq %rax, %r13
movq %rdx, %r14
movq 0x40(%rsi), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rcx, %rcx
movq 0x48(%rsi), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rcx, %rcx
movq 0x58(%rsi), %rbx
movq 0x48(%rsi), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorl %ecx, %ecx
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %rcx, %rcx
movq 0x48(%rsi), %rax
mulq %rax
addq %r15, %r9
adcq %rax, %r10
adcq %rdx, %r11
sbbq %r15, %r15
movq 0x50(%rsi), %rax
mulq %rax
negq %r15
adcq %rax, %r12
adcq %rdx, %r13
sbbq %r15, %r15
movq 0x58(%rsi), %rax
mulq %rax
negq %r15
adcq %rax, %r14
adcq %rcx, %rdx
movq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, (%rsp)
movq %r13, 0x8(%rsp)
movq %r14, 0x10(%rsp)
movq %r15, 0x18(%rsp)
movq 0x20(%rsi), %rax
movq %rax, %rbx
mulq %rax
movq %rax, %r8
movq %rdx, %r15
movq 0x28(%rsi), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x38(%rsi), %rax
movq %rax, %r13
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x30(%rsi), %rax
movq %rax, %rbx
mulq %r13
movq %rax, %r13
movq %rdx, %r14
movq 0x20(%rsi), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rcx, %rcx
movq 0x28(%rsi), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rcx, %rcx
movq 0x38(%rsi), %rbx
movq 0x28(%rsi), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorl %ecx, %ecx
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %rcx, %rcx
movq 0x28(%rsi), %rax
mulq %rax
addq %r15, %r9
adcq %rax, %r10
adcq %rdx, %r11
sbbq %r15, %r15
movq 0x30(%rsi), %rax
mulq %rax
negq %r15
adcq %rax, %r12
adcq %rdx, %r13
sbbq %r15, %r15
movq 0x38(%rsi), %rax
mulq %rax
negq %r15
adcq %rax, %r14
adcq %rcx, %rdx
movq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x20(%rsp)
movq %r13, 0x28(%rsp)
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq (%rsi), %rax
subq (%rsp), %rax
movq 0x8(%rsi), %rcx
sbbq 0x8(%rsp), %rcx
movq 0x10(%rsi), %r8
sbbq 0x10(%rsp), %r8
movq 0x18(%rsi), %r9
sbbq 0x18(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x60(%rsp)
adcq %r10, %rcx
movq %rcx, 0x68(%rsp)
adcq %r11, %r8
movq %r8, 0x70(%rsp)
adcq %rdx, %r9
movq %r9, 0x78(%rsp)
movq (%rsi), %rax
addq (%rsp), %rax
movq 0x8(%rsi), %rcx
adcq 0x8(%rsp), %rcx
movq 0x10(%rsi), %r8
adcq 0x10(%rsp), %r8
movq 0x18(%rsi), %r9
adcq 0x18(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
subq %r11, %rax
movq %rax, 0x40(%rsp)
sbbq %r10, %rcx
movq %rcx, 0x48(%rsp)
sbbq %r11, %r8
movq %r8, 0x50(%rsp)
sbbq %rdx, %r9
movq %r9, 0x58(%rsp)
movq 0x40(%rsp), %rax
mulq 0x60(%rsp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0x40(%rsp), %rax
mulq 0x68(%rsp)
addq %rax, %r9
adcq %rdx, %r10
movq 0x48(%rsp), %rax
mulq 0x60(%rsp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0x40(%rsp), %rax
mulq 0x70(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0x48(%rsp), %rax
mulq 0x68(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0x50(%rsp), %rax
mulq 0x60(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0x40(%rsp), %rax
mulq 0x78(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0x48(%rsp), %rax
mulq 0x70(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x50(%rsp), %rax
mulq 0x68(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x58(%rsp), %rax
mulq 0x60(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0x48(%rsp), %rax
mulq 0x78(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0x50(%rsp), %rax
mulq 0x70(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0x58(%rsp), %rax
mulq 0x68(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0x50(%rsp), %rax
mulq 0x78(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0x58(%rsp), %rax
mulq 0x70(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0x58(%rsp), %rax
mulq 0x78(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x60(%rsp)
movq %r13, 0x68(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x78(%rsp)
xorq %r11, %r11
movq 0x20(%rsi), %rax
addq 0x40(%rsi), %rax
movq 0x28(%rsi), %rcx
adcq 0x48(%rsi), %rcx
movq 0x30(%rsi), %r8
adcq 0x50(%rsi), %r8
movq 0x38(%rsi), %r9
adcq 0x58(%rsi), %r9
adcq %r11, %r11
subq $0xffffffffffffffff, %rax
movabsq $0xffffffff00000000, %r10
sbbq %r10, %rcx
sbbq $0xffffffffffffffff, %r8
movabsq $0xfffffffeffffffff, %rdx
sbbq %rdx, %r9
sbbq $0x0, %r11
andq %r11, %r10
andq %r11, %rdx
addq %r11, %rax
movq %rax, 0x40(%rsp)
adcq %r10, %rcx
movq %rcx, 0x48(%rsp)
adcq %r11, %r8
movq %r8, 0x50(%rsp)
adcq %rdx, %r9
movq %r9, 0x58(%rsp)
movq (%rsi), %rax
mulq 0x20(%rsp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq (%rsi), %rax
mulq 0x28(%rsp)
addq %rax, %r9
adcq %rdx, %r10
movq 0x8(%rsi), %rax
mulq 0x20(%rsp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq (%rsi), %rax
mulq 0x30(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0x8(%rsi), %rax
mulq 0x28(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0x10(%rsi), %rax
mulq 0x20(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq (%rsi), %rax
mulq 0x38(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0x8(%rsi), %rax
mulq 0x30(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x10(%rsi), %rax
mulq 0x28(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0x18(%rsi), %rax
mulq 0x20(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0x8(%rsi), %rax
mulq 0x38(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0x10(%rsi), %rax
mulq 0x30(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0x18(%rsi), %rax
mulq 0x28(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0x10(%rsi), %rax
mulq 0x38(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0x18(%rsi), %rax
mulq 0x30(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0x18(%rsi), %rax
mulq 0x38(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x80(%rsp)
movq %r13, 0x88(%rsp)
movq %r14, 0x90(%rsp)
movq %r15, 0x98(%rsp)
movq 0x60(%rsp), %rax
movq %rax, %rbx
mulq %rax
movq %rax, %r8
movq %rdx, %r15
movq 0x68(%rsp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x78(%rsp), %rax
movq %rax, %r13
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x70(%rsp), %rax
movq %rax, %rbx
mulq %r13
movq %rax, %r13
movq %rdx, %r14
movq 0x60(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rcx, %rcx
movq 0x68(%rsp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rcx, %rcx
movq 0x78(%rsp), %rbx
movq 0x68(%rsp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorl %ecx, %ecx
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %rcx, %rcx
movq 0x68(%rsp), %rax
mulq %rax
addq %r15, %r9
adcq %rax, %r10
adcq %rdx, %r11
sbbq %r15, %r15
movq 0x70(%rsp), %rax
mulq %rax
negq %r15
adcq %rax, %r12
adcq %rdx, %r13
sbbq %r15, %r15
movq 0x78(%rsp), %rax
mulq %rax
negq %r15
adcq %rax, %r14
adcq %rcx, %rdx
movq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0xa0(%rsp)
movq %r13, 0xa8(%rsp)
movq %r14, 0xb0(%rsp)
movq %r15, 0xb8(%rsp)
movq 0x40(%rsp), %rax
movq %rax, %rbx
mulq %rax
movq %rax, %r8
movq %rdx, %r15
movq 0x48(%rsp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x58(%rsp), %rax
movq %rax, %r13
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x50(%rsp), %rax
movq %rax, %rbx
mulq %r13
movq %rax, %r13
movq %rdx, %r14
movq 0x40(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rcx, %rcx
movq 0x48(%rsp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rcx, %rcx
movq 0x58(%rsp), %rbx
movq 0x48(%rsp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorl %ecx, %ecx
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %rcx, %rcx
movq 0x48(%rsp), %rax
mulq %rax
addq %r15, %r9
adcq %rax, %r10
adcq %rdx, %r11
sbbq %r15, %r15
movq 0x50(%rsp), %rax
mulq %rax
negq %r15
adcq %rax, %r12
adcq %rdx, %r13
sbbq %r15, %r15
movq 0x58(%rsp), %rax
mulq %rax
negq %r15
adcq %rax, %r14
adcq %rcx, %rdx
movq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x40(%rsp)
movq %r13, 0x48(%rsp)
movq %r14, 0x50(%rsp)
movq %r15, 0x58(%rsp)
movq $0xffffffffffffffff, %r9
movq %r9, %r11
subq 0xa0(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq 0xa8(%rsp), %r10
sbbq 0xb0(%rsp), %r11
movabsq $0xfffffffeffffffff, %r12
sbbq 0xb8(%rsp), %r12
movq $0x9, %rcx
movq %r9, %rax
mulq %rcx
movq %rax, %r8
movq %rdx, %r9
movq %r10, %rax
xorl %r10d, %r10d
mulq %rcx
addq %rax, %r9
adcq %rdx, %r10
movq %r11, %rax
xorl %r11d, %r11d
mulq %rcx
addq %rax, %r10
adcq %rdx, %r11
movq %r12, %rax
xorl %r12d, %r12d
mulq %rcx
addq %rax, %r11
adcq %rdx, %r12
movl $0xc, %ecx
movq 0x80(%rsp), %rax
mulq %rcx
addq %rax, %r8
adcq %rdx, %r9
sbbq %rbx, %rbx
movq 0x88(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %rbx, %rbx
movq 0x90(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbx, %rbx
movq 0x98(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r11
adcq %rdx, %r12
leaq 0x1(%r12), %rdx
movq %rdx, %rax
shlq $0x20, %rax
movq %rax, %rcx
subq %rdx, %rax
addq %rdx, %r8
adcq %rax, %r9
adcq $0x0, %r10
adcq %rcx, %r11
sbbq %rdx, %rdx
notq %rdx
movabsq $0xffffffff00000000, %rax
andq %rdx, %rax
movq %rdx, %rcx
btr $0x20, %rcx
addq %rdx, %r8
movq %r8, 0xa0(%rsp)
adcq %rax, %r9
movq %r9, 0xa8(%rsp)
adcq %rdx, %r10
movq %r10, 0xb0(%rsp)
adcq %rcx, %r11
movq %r11, 0xb8(%rsp)
movq 0x40(%rsp), %rax
subq (%rsp), %rax
movq 0x48(%rsp), %rcx
sbbq 0x8(%rsp), %rcx
movq 0x50(%rsp), %r8
sbbq 0x10(%rsp), %r8
movq 0x58(%rsp), %r9
sbbq 0x18(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x40(%rsp)
adcq %r10, %rcx
movq %rcx, 0x48(%rsp)
adcq %r11, %r8
movq %r8, 0x50(%rsp)
adcq %rdx, %r9
movq %r9, 0x58(%rsp)
movq 0x20(%rsp), %rax
movq %rax, %rbx
mulq %rax
movq %rax, %r8
movq %rdx, %r15
movq 0x28(%rsp), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 0x38(%rsp), %rax
movq %rax, %r13
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 0x30(%rsp), %rax
movq %rax, %rbx
mulq %r13
movq %rax, %r13
movq %rdx, %r14
movq 0x20(%rsp), %rax
mulq %rbx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rcx, %rcx
movq 0x28(%rsp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r11
adcq %rdx, %r12
sbbq %rcx, %rcx
movq 0x38(%rsp), %rbx
movq 0x28(%rsp), %rax
mulq %rbx
subq %rcx, %rdx
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorl %ecx, %ecx
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %rcx, %rcx
movq 0x28(%rsp), %rax
mulq %rax
addq %r15, %r9
adcq %rax, %r10
adcq %rdx, %r11
sbbq %r15, %r15
movq 0x30(%rsp), %rax
mulq %rax
negq %r15
adcq %rax, %r12
adcq %rdx, %r13
sbbq %r15, %r15
movq 0x38(%rsp), %rax
mulq %rax
negq %r15
adcq %rax, %r14
adcq %rcx, %rdx
movq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, (%rsp)
movq %r13, 0x8(%rsp)
movq %r14, 0x10(%rsp)
movq %r15, 0x18(%rsp)
movq 0xa0(%rsp), %rax
mulq 0x60(%rsp)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
xorq %r11, %r11
movq 0xa0(%rsp), %rax
mulq 0x68(%rsp)
addq %rax, %r9
adcq %rdx, %r10
movq 0xa8(%rsp), %rax
mulq 0x60(%rsp)
addq %rax, %r9
adcq %rdx, %r10
adcq %r11, %r11
xorq %r12, %r12
movq 0xa0(%rsp), %rax
mulq 0x70(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq %r12, %r12
movq 0xa8(%rsp), %rax
mulq 0x68(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
movq 0xb0(%rsp), %rax
mulq 0x60(%rsp)
addq %rax, %r10
adcq %rdx, %r11
adcq $0x0, %r12
xorq %r13, %r13
movq 0xa0(%rsp), %rax
mulq 0x78(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq %r13, %r13
movq 0xa8(%rsp), %rax
mulq 0x70(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0xb0(%rsp), %rax
mulq 0x68(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
movq 0xb8(%rsp), %rax
mulq 0x60(%rsp)
addq %rax, %r11
adcq %rdx, %r12
adcq $0x0, %r13
xorq %r14, %r14
movq 0xa8(%rsp), %rax
mulq 0x78(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq %r14, %r14
movq 0xb0(%rsp), %rax
mulq 0x70(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
movq 0xb8(%rsp), %rax
mulq 0x68(%rsp)
addq %rax, %r12
adcq %rdx, %r13
adcq $0x0, %r14
xorq %r15, %r15
movq 0xb0(%rsp), %rax
mulq 0x78(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq %r15, %r15
movq 0xb8(%rsp), %rax
mulq 0x70(%rsp)
addq %rax, %r13
adcq %rdx, %r14
adcq $0x0, %r15
movq 0xb8(%rsp), %rax
mulq 0x78(%rsp)
addq %rax, %r14
adcq %rdx, %r15
movq %r8, %rax
shlq $0x20, %rax
movq %r8, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r8, %rax
sbbq $0x0, %rcx
subq %rax, %r9
sbbq %rcx, %r10
sbbq %rdx, %r11
sbbq %rbx, %r8
movq %r9, %rax
shlq $0x20, %rax
movq %r9, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r9, %rax
sbbq $0x0, %rcx
subq %rax, %r10
sbbq %rcx, %r11
sbbq %rdx, %r8
sbbq %rbx, %r9
movq %r10, %rax
shlq $0x20, %rax
movq %r10, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r10, %rax
sbbq $0x0, %rcx
subq %rax, %r11
sbbq %rcx, %r8
sbbq %rdx, %r9
sbbq %rbx, %r10
movq %r11, %rax
shlq $0x20, %rax
movq %r11, %rcx
shrq $0x20, %rcx
movq %rax, %rdx
movq %rcx, %rbx
subq %r11, %rax
sbbq $0x0, %rcx
subq %rax, %r8
sbbq %rcx, %r9
sbbq %rdx, %r10
sbbq %rbx, %r11
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
movl $0x1, %ecx
movl $0xffffffff, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 0x1(%rdx), %r11
adcq %r13, %rdx
leaq -0x1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
cmovbq %rcx, %r12
cmovbq %rdx, %r13
cmovbq %rbx, %r14
cmovbq %r11, %r15
movq %r12, 0x60(%rsp)
movq %r13, 0x68(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x78(%rsp)
movq 0x40(%rsp), %rax
subq 0x20(%rsp), %rax
movq 0x48(%rsp), %rcx
sbbq 0x28(%rsp), %rcx
movq 0x50(%rsp), %r8
sbbq 0x30(%rsp), %r8
movq 0x58(%rsp), %r9
sbbq 0x38(%rsp), %r9
movabsq $0xffffffff00000000, %r10
sbbq %r11, %r11
andq %r11, %r10
movq %r11, %rdx
btr $0x20, %rdx
addq %r11, %rax
movq %rax, 0x40(%rdi)
adcq %r10, %rcx
movq %rcx, 0x48(%rdi)
adcq %r11, %r8
movq %r8, 0x50(%rdi)
adcq %rdx, %r9
movq %r9, 0x58(%rdi)
movq 0x98(%rsp), %r11
movq %r11, %rdx
movq 0x90(%rsp), %r10
shldq $0x2, %r10, %r11
movq 0x88(%rsp), %r9
shldq $0x2, %r9, %r10
movq 0x80(%rsp), %r8
shldq $0x2, %r8, %r9
shlq $0x2, %r8
shrq $0x3e, %rdx
addq $0x1, %rdx
subq 0xa0(%rsp), %r8
sbbq 0xa8(%rsp), %r9
sbbq 0xb0(%rsp), %r10
sbbq 0xb8(%rsp), %r11
sbbq $0x0, %rdx
movq %rdx, %rax
shlq $0x20, %rax
movq %rax, %rcx
subq %rdx, %rax
addq %rdx, %r8
adcq %rax, %r9
adcq $0x0, %r10
adcq %rcx, %r11
sbbq %rdx, %rdx
notq %rdx
movabsq $0xffffffff00000000, %rax
andq %rdx, %rax
movq %rdx, %rcx
btr $0x20, %rcx
addq %rdx, %r8
movq %r8, (%rdi)
adcq %rax, %r9
movq %r9, 0x8(%rdi)
adcq %rdx, %r10
movq %r10, 0x10(%rdi)
adcq %rcx, %r11
movq %r11, 0x18(%rdi)
movq $0xffffffffffffffff, %r8
movq %r8, %r10
subq (%rsp), %r8
movabsq $0xffffffff00000000, %r9
sbbq 0x8(%rsp), %r9
sbbq 0x10(%rsp), %r10
movabsq $0xfffffffeffffffff, %r11
sbbq 0x18(%rsp), %r11
movq %r11, %r12
shldq $0x3, %r10, %r11
shldq $0x3, %r9, %r10
shldq $0x3, %r8, %r9
shlq $0x3, %r8
shrq $0x3d, %r12
movl $0x3, %ecx
movq 0x60(%rsp), %rax
mulq %rcx
addq %rax, %r8
adcq %rdx, %r9
sbbq %rbx, %rbx
movq 0x68(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r9
adcq %rdx, %r10
sbbq %rbx, %rbx
movq 0x70(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r10
adcq %rdx, %r11
sbbq %rbx, %rbx
movq 0x78(%rsp), %rax
mulq %rcx
subq %rbx, %rdx
addq %rax, %r11
adcq %rdx, %r12
leaq 0x1(%r12), %rdx
movq %rdx, %rax
shlq $0x20, %rax
movq %rax, %rcx
subq %rdx, %rax
addq %rdx, %r8
adcq %rax, %r9
adcq $0x0, %r10
adcq %rcx, %r11
sbbq %rdx, %rdx
notq %rdx
movabsq $0xffffffff00000000, %rax
andq %rdx, %rax
movq %rdx, %rcx
btr $0x20, %rcx
addq %rdx, %r8
movq %r8, 0x20(%rdi)
adcq %rax, %r9
movq %r9, 0x28(%rdi)
adcq %rdx, %r10
movq %r10, 0x30(%rdi)
adcq %rcx, %r11
movq %r11, 0x38(%rdi)
CFI_INC_RSP(192)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
CFI_RET
S2N_BN_SIZE_DIRECTIVE(Lsm2_montjscalarmul_alt_sm2_montjdouble)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 2,480
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_mod_nsm2_4.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo group order, z := x mod n_sm2
// Input x[4]; output z[4]
//
// extern void bignum_mod_nsm2_4(uint64_t z[static 4], const uint64_t x[static 4]);
//
// Reduction is modulo the group order of the GM/T 0003-2012 curve SM2.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_nsm2_4)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_nsm2_4)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_nsm2_4)
.text
#define z %rdi
#define x %rsi
#define d0 %rdx
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define n0 %rax
#define n1 %r10
#define n3 %r11
// Can re-use this as a temporary once we've loaded the input
#define c %rsi
S2N_BN_SYMBOL(bignum_mod_nsm2_4):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load a set of registers [n3; 0; n1; n0] = 2^256 - n_sm2
movq $0xac440bf6c62abedd, n0
movq $0x8dfc2094de39fad4, n1
movq $0x0000000100000000, n3
// Load the input and compute x + (2^256 - n_sm2)
movq (x), d0
addq n0, d0
movq 8(x), d1
adcq n1, d1
movq 16(x), d2
adcq $0, d2
movq 24(x), d3
adcq n3, d3
// Now CF is set iff 2^256 <= x + (2^256 - n_sm2), i.e. iff n_sm2 <= x.
// Create a mask for the condition x < n, and mask the three nontrivial digits
// ready to undo the previous addition with a compensating subtraction
sbbq c, c
notq c
andq c, n0
andq c, n1
andq c, n3
// Now subtract mask * (2^256 - n_sm2) again and store
subq n0, d0
movq d0, (z)
sbbq n1, d1
movq d1, 8(z)
sbbq $0, d2
movq d2, 16(z)
sbbq n3, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mod_nsm2_4)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 22,601
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/sm2_montjmixadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point mixed addition on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates
//
// extern void sm2_montjmixadd_alt(uint64_t p3[static 12],
// const uint64_t p1[static 12],
// const uint64_t p2[static 8]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
// The "mixed" part means that p2 only has x and y coordinates, with the
// implicit z coordinate assumed to be the identity.
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjmixadd_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjmixadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjmixadd_alt)
.text
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1 and %rbp = p2,
// which needs to be set up explicitly before use.
// By design, none of the code macros modify any of
// these, so we maintain the assignments throughout.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_2 0(%rbp)
#define y_2 NUMSIZE(%rbp)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define zp2 (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define NSPACE NUMSIZE*6
// Corresponds to bignum_montmul_sm2_alt except for registers
#define montmul_sm2(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds to bignum_montsqr_sm2_alt except for registers
#define montsqr_sm2(P0,P1) \
movq P1, %rax ; \
movq %rax, %rbx ; \
mulq %rax; \
movq %rax, %r8 ; \
movq %rdx, %r15 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
movq %rax, %r9 ; \
movq %rdx, %r10 ; \
movq 0x18+P1, %rax ; \
movq %rax, %r13 ; \
mulq %rbx; \
movq %rax, %r11 ; \
movq %rdx, %r12 ; \
movq 0x10+P1, %rax ; \
movq %rax, %rbx ; \
mulq %r13; \
movq %rax, %r13 ; \
movq %rdx, %r14 ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %rcx, %rcx ; \
movq 0x18+P1, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorl %ecx, %ecx ; \
addq %r9, %r9 ; \
adcq %r10, %r10 ; \
adcq %r11, %r11 ; \
adcq %r12, %r12 ; \
adcq %r13, %r13 ; \
adcq %r14, %r14 ; \
adcq %rcx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq %rax; \
addq %r15, %r9 ; \
adcq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
negq %r15; \
adcq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r15, %r15 ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
negq %r15; \
adcq %rax, %r14 ; \
adcq %rcx, %rdx ; \
movq %rdx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds exactly to bignum_sub_sm2
#define sub_sm2(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rcx ; \
sbbq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq $0xffffffff00000000, %r10 ; \
sbbq %r11, %r11 ; \
andq %r11, %r10 ; \
movq %r11, %rdx ; \
btr $0x20, %rdx ; \
addq %r11, %rax ; \
movq %rax, P0 ; \
adcq %r10, %rcx ; \
movq %rcx, 0x8+P0 ; \
adcq %r11, %r8 ; \
movq %r8, 0x10+P0 ; \
adcq %rdx, %r9 ; \
movq %r9, 0x18+P0
// Additional macros to help with final multiplexing
#define testzero4(P) \
movq P, %rax ; \
movq 8+P, %rdx ; \
orq 16+P, %rax ; \
orq 24+P, %rdx ; \
orq %rdx, %rax
#define mux4(r0,r1,r2,r3,PNE,PEQ) \
movq PNE, r0 ; \
movq PEQ, %rax ; \
cmovzq %rax, r0 ; \
movq 8+PNE, r1 ; \
movq 8+PEQ, %rax ; \
cmovzq %rax, r1 ; \
movq 16+PNE, r2 ; \
movq 16+PEQ, %rax ; \
cmovzq %rax, r2 ; \
movq 24+PNE, r3 ; \
movq 24+PEQ, %rax ; \
cmovzq %rax, r3
#define load4(r0,r1,r2,r3,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3
#define store4(P,r0,r1,r2,r3) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P
S2N_BN_SYMBOL(sm2_montjmixadd_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
// Put the input y in %rbp where it lasts throughout the main code.
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rdx, %rbp
// Main code, just a sequence of basic field operations
// 8 * multiply + 3 * square + 7 * subtract
montsqr_sm2(zp2,z_1)
montmul_sm2(y2a,z_1,y_2)
montmul_sm2(x2a,zp2,x_2)
montmul_sm2(y2a,zp2,y2a)
sub_sm2(xd,x2a,x_1)
sub_sm2(yd,y2a,y_1)
montsqr_sm2(zz,xd)
montsqr_sm2(ww,yd)
montmul_sm2(zzx1,zz,x_1)
montmul_sm2(zzx2,zz,x2a)
sub_sm2(resx,ww,zzx1)
sub_sm2(t1,zzx2,zzx1)
montmul_sm2(resz,xd,z_1)
sub_sm2(resx,resx,zzx2)
sub_sm2(t2,zzx1,resx)
montmul_sm2(t1,t1,y_1)
montmul_sm2(t2,yd,t2)
sub_sm2(resy,t2,t1)
// Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence)
testzero4(z_1)
// Multiplex: if p1 <> 0 just copy the computed result from the staging area.
// If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in
// Montgomery form so not the simple constant 1 but rather 2^256 - p_sm2),
// hence giving 0 + p2 = p2 for the final result.
mux4(%r8,%r9,%r10,%r11,resx,x_2)
mux4(%r12,%r13,%r14,%r15,resy,y_2)
store4(x_3,%r8,%r9,%r10,%r11)
store4(y_3,%r12,%r13,%r14,%r15)
load4(%r8,%r9,%r10,%r11,resz)
movl $1, %eax
cmovzq %rax, %r8
movl $0x00000000ffffffff, %eax
cmovzq %rax, %r9
movl $0, %eax
cmovzq %rax, %r10
movq $0x0000000100000000, %rax
cmovzq %rax, %r11
store4(z_3,%r8,%r9,%r10,%r11)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(sm2_montjmixadd_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 32,572
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/sm2_montjdouble_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates
//
// extern void sm2_montjdouble_alt(uint64_t p3[static 12],
// const uint64_t p1[static 12]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard x86-64 ABI: RDI = p3, RSI = p1
// Microsoft x64 ABI: RCX = p3, RDX = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjdouble_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjdouble_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjdouble_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1, which is true when the
// arguments come in initially and is not disturbed throughout.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z2 (NUMSIZE*0)(%rsp)
#define y4 (NUMSIZE*0)(%rsp)
#define y2 (NUMSIZE*1)(%rsp)
#define t1 (NUMSIZE*2)(%rsp)
#define t2 (NUMSIZE*3)(%rsp)
#define x2p (NUMSIZE*3)(%rsp)
#define dx2 (NUMSIZE*3)(%rsp)
#define xy2 (NUMSIZE*4)(%rsp)
#define x4p (NUMSIZE*5)(%rsp)
#define d (NUMSIZE*5)(%rsp)
#define NSPACE NUMSIZE*6
// Corresponds to bignum_montmul_sm2_alt except for registers
#define montmul_sm2(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds to bignum_montsqr_sm2_alt except for registers
#define montsqr_sm2(P0,P1) \
movq P1, %rax ; \
movq %rax, %rbx ; \
mulq %rax; \
movq %rax, %r8 ; \
movq %rdx, %r15 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
movq %rax, %r9 ; \
movq %rdx, %r10 ; \
movq 0x18+P1, %rax ; \
movq %rax, %r13 ; \
mulq %rbx; \
movq %rax, %r11 ; \
movq %rdx, %r12 ; \
movq 0x10+P1, %rax ; \
movq %rax, %rbx ; \
mulq %r13; \
movq %rax, %r13 ; \
movq %rdx, %r14 ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %rcx, %rcx ; \
movq 0x18+P1, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorl %ecx, %ecx ; \
addq %r9, %r9 ; \
adcq %r10, %r10 ; \
adcq %r11, %r11 ; \
adcq %r12, %r12 ; \
adcq %r13, %r13 ; \
adcq %r14, %r14 ; \
adcq %rcx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq %rax; \
addq %r15, %r9 ; \
adcq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
negq %r15; \
adcq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r15, %r15 ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
negq %r15; \
adcq %rax, %r14 ; \
adcq %rcx, %rdx ; \
movq %rdx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds exactly to bignum_sub_sm2
#define sub_sm2(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rcx ; \
sbbq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq $0xffffffff00000000, %r10 ; \
sbbq %r11, %r11 ; \
andq %r11, %r10 ; \
movq %r11, %rdx ; \
btr $0x20, %rdx ; \
addq %r11, %rax ; \
movq %rax, P0 ; \
adcq %r10, %rcx ; \
movq %rcx, 0x8+P0 ; \
adcq %r11, %r8 ; \
movq %r8, 0x10+P0 ; \
adcq %rdx, %r9 ; \
movq %r9, 0x18+P0
// Corresponds exactly to bignum_add_sm2
#define add_sm2(P0,P1,P2) \
xorq %r11, %r11 ; \
movq P1, %rax ; \
addq P2, %rax ; \
movq 0x8+P1, %rcx ; \
adcq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
adcq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
adcq 0x18+P2, %r9 ; \
adcq %r11, %r11 ; \
subq $0xffffffffffffffff, %rax ; \
movq $0xffffffff00000000, %r10 ; \
sbbq %r10, %rcx ; \
sbbq $0xffffffffffffffff, %r8 ; \
movq $0xfffffffeffffffff, %rdx ; \
sbbq %rdx, %r9 ; \
sbbq $0x0, %r11 ; \
andq %r11, %r10 ; \
andq %r11, %rdx ; \
addq %r11, %rax ; \
movq %rax, P0 ; \
adcq %r10, %rcx ; \
movq %rcx, 0x8+P0 ; \
adcq %r11, %r8 ; \
movq %r8, 0x10+P0 ; \
adcq %rdx, %r9 ; \
movq %r9, 0x18+P0
// A weak version of add that only guarantees sum in 4 digits
#define weakadd_sm2(P0,P1,P2) \
movq P1, %rax ; \
addq P2, %rax ; \
movq 0x8+P1, %rcx ; \
adcq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
adcq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
adcq 0x18+P2, %r9 ; \
movq $0xffffffff00000000, %r10 ; \
sbbq %r11, %r11 ; \
andq %r11, %r10 ; \
movq %r11, %rdx ; \
btr $0x20, %rdx ; \
subq %r11, %rax ; \
movq %rax, P0 ; \
sbbq %r10, %rcx ; \
movq %rcx, 0x8+P0 ; \
sbbq %r11, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq %rdx, %r9 ; \
movq %r9, 0x18+P0
// P0 = C * P1 - D * P2 computed as d * (p_sm2 - P2) + c * P1
// Quotient estimation is done just as q = h + 1 as in bignum_triple_sm2
// This also applies to the other functions following.
#define cmsub_sm2(P0,C,P1,D,P2) \
/* First (%r12;%r11;%r10;%r9) = p_sm2 - P2 */ \
movq $0xffffffffffffffff, %r9 ; \
movq %r9, %r11 ; \
subq P2, %r9 ; \
movq $0xffffffff00000000, %r10 ; \
sbbq 0x8+P2, %r10 ; \
sbbq 0x10+P2, %r11 ; \
movq $0xfffffffeffffffff, %r12 ; \
sbbq 0x18+P2, %r12 ; \
/* (%r12;%r11;%r10;%r9;%r8) = D * (p_sm2 - P2) */ \
movq $D, %rcx ; \
movq %r9, %rax ; \
mulq %rcx; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
movq %r10, %rax ; \
xorl %r10d, %r10d ; \
mulq %rcx; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq %r11, %rax ; \
xorl %r11d, %r11d ; \
mulq %rcx; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
movq %r12, %rax ; \
xorl %r12d, %r12d ; \
mulq %rcx; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
/* (%rdx;%r11;%r10;%r9;%r8) = 2^256 + C * P1 + D * (p_sm2 - P2) */ \
movl $C, %ecx ; \
movq P1, %rax ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rbx, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rbx, %rbx ; \
movq 0x10+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbx, %rbx ; \
movq 0x18+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
leaq 1(%r12), %rdx ; \
/* Now the tail for modular reduction from tripling */ \
movq %rdx, %rax ; \
shlq $0x20, %rax ; \
movq %rax, %rcx ; \
subq %rdx, %rax ; \
addq %rdx, %r8 ; \
adcq %rax, %r9 ; \
adcq $0x0, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rdx, %rdx ; \
notq %rdx; \
movq $0xffffffff00000000, %rax ; \
andq %rdx, %rax ; \
movq %rdx, %rcx ; \
btr $0x20, %rcx ; \
addq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rax, %r9 ; \
movq %r9, 0x8+P0 ; \
adcq %rdx, %r10 ; \
movq %r10, 0x10+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 0x18+P0
// P0 = 3 * P1 - 8 * P2, computed as (p_sm2 - P2) << 3 + 3 * P1
#define cmsub38_sm2(P0,P1,P2) \
/* First (%r11;%r10;%r9;%r8) = p_sm2 - P2 */ \
movq $0xffffffffffffffff, %r8 ; \
movq %r8, %r10 ; \
subq P2, %r8 ; \
movq $0xffffffff00000000, %r9 ; \
sbbq 0x8+P2, %r9 ; \
sbbq 0x10+P2, %r10 ; \
movq $0xfffffffeffffffff, %r11 ; \
sbbq 0x18+P2, %r11 ; \
/* (%r12;%r11;%r10;%r9;%r8) = (p_sm2 - P2) << 3 */ \
movq %r11, %r12 ; \
shldq $3, %r10, %r11 ; \
shldq $3, %r9, %r10 ; \
shldq $3, %r8, %r9 ; \
shlq $3, %r8 ; \
shrq $61, %r12 ; \
/* (%rdx;%r11;%r10;%r9;%r8) = 2^256 + 3 * P1 + 8 * (p_sm2 - P2) */ \
movl $3, %ecx ; \
movq P1, %rax ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rbx, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rbx, %rbx ; \
movq 0x10+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbx, %rbx ; \
movq 0x18+P1, %rax ; \
mulq %rcx; \
subq %rbx, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
leaq 1(%r12), %rdx ; \
/* Now the tail for modular reduction from tripling */ \
movq %rdx, %rax ; \
shlq $0x20, %rax ; \
movq %rax, %rcx ; \
subq %rdx, %rax ; \
addq %rdx, %r8 ; \
adcq %rax, %r9 ; \
adcq $0x0, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rdx, %rdx ; \
notq %rdx; \
movq $0xffffffff00000000, %rax ; \
andq %rdx, %rax ; \
movq %rdx, %rcx ; \
btr $0x20, %rcx ; \
addq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rax, %r9 ; \
movq %r9, 0x8+P0 ; \
adcq %rdx, %r10 ; \
movq %r10, 0x10+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 0x18+P0
// P0 = 4 * P1 - P2, by direct subtraction of P2,
// since the quotient estimate still works safely
// for initial value > -p_sm2
#define cmsub41_sm2(P0,P1,P2) \
movq 0x18+P1, %r11 ; \
movq %r11, %rdx ; \
movq 0x10+P1, %r10 ; \
shldq $2, %r10, %r11 ; \
movq 0x8+P1, %r9 ; \
shldq $2, %r9, %r10 ; \
movq P1, %r8 ; \
shldq $2, %r8, %r9 ; \
shlq $2, %r8 ; \
shrq $62, %rdx ; \
addq $1, %rdx ; \
subq P2, %r8 ; \
sbbq 0x8+P2, %r9 ; \
sbbq 0x10+P2, %r10 ; \
sbbq 0x18+P2, %r11 ; \
sbbq $0, %rdx ; \
/* Now the tail for modular reduction from tripling */ \
movq %rdx, %rax ; \
shlq $0x20, %rax ; \
movq %rax, %rcx ; \
subq %rdx, %rax ; \
addq %rdx, %r8 ; \
adcq %rax, %r9 ; \
adcq $0x0, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rdx, %rdx ; \
notq %rdx; \
movq $0xffffffff00000000, %rax ; \
andq %rdx, %rax ; \
movq %rdx, %rcx ; \
btr $0x20, %rcx ; \
addq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rax, %r9 ; \
movq %r9, 0x8+P0 ; \
adcq %rdx, %r10 ; \
movq %r10, 0x10+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 0x18+P0
S2N_BN_SYMBOL(sm2_montjdouble_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room on stack for temporary variables
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Main code, just a sequence of basic field operations
// z2 = z^2
// y2 = y^2
montsqr_sm2(z2,z_1)
montsqr_sm2(y2,y_1)
// x2p = x^2 - z^4 = (x + z^2) * (x - z^2)
sub_sm2(t2,x_1,z2)
weakadd_sm2(t1,x_1,z2)
montmul_sm2(x2p,t1,t2)
// t1 = y + z
// xy2 = x * y^2
// x4p = x2p^2
add_sm2(t1,y_1,z_1)
montmul_sm2(xy2,x_1,y2)
montsqr_sm2(x4p,x2p)
// t1 = (y + z)^2
montsqr_sm2(t1,t1)
// d = 12 * xy2 - 9 * x4p
// t1 = y^2 + 2 * y * z
cmsub_sm2(d,12,xy2,9,x4p)
sub_sm2(t1,t1,z2)
// y4 = y^4
montsqr_sm2(y4,y2)
// dx2 = d * x2p
montmul_sm2(dx2,d,x2p)
// z_3' = 2 * y * z
sub_sm2(z_3,t1,y2)
// x' = 4 * xy2 - d
cmsub41_sm2(x_3,xy2,d)
// y' = 3 * dx2 - 8 * y4
cmsub38_sm2(y_3,dx2,y4)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(sm2_montjdouble_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 28,698
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/sm2_montjdouble.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates
//
// extern void sm2_montjdouble(uint64_t p3[static 12],
// const uint64_t p1[static 12]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard x86-64 ABI: RDI = p3, RSI = p1
// Microsoft x64 ABI: RCX = p3, RDX = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjdouble)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjdouble)
S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjdouble)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1, which is true when the
// arguments come in initially and is not disturbed throughout.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z2 (NUMSIZE*0)(%rsp)
#define y4 (NUMSIZE*0)(%rsp)
#define y2 (NUMSIZE*1)(%rsp)
#define t1 (NUMSIZE*2)(%rsp)
#define t2 (NUMSIZE*3)(%rsp)
#define x2p (NUMSIZE*3)(%rsp)
#define dx2 (NUMSIZE*3)(%rsp)
#define xy2 (NUMSIZE*4)(%rsp)
#define x4p (NUMSIZE*5)(%rsp)
#define d (NUMSIZE*5)(%rsp)
#define NSPACE NUMSIZE*6
// Corresponds to bignum_montmul_sm2 except for registers
#define montmul_sm2(P0,P1,P2) \
xorl %ecx, %ecx ; \
movq P2, %rdx ; \
mulxq P1, %r8, %r9 ; \
mulxq 0x8+P1, %rax, %r10 ; \
addq %rax, %r9 ; \
mulxq 0x10+P1, %rax, %r11 ; \
adcq %rax, %r10 ; \
mulxq 0x18+P1, %rax, %r12 ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
xorl %ecx, %ecx ; \
movq 0x8+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rcx, %r13 ; \
xorl %ecx, %ecx ; \
movq 0x10+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
adcxq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
movq 0x18+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %r15 ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
adcxq %rcx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds to bignum_montsqr_sm2 except for registers
#define montsqr_sm2(P0,P1) \
movq P1, %rdx ; \
mulxq %rdx, %r8, %r15 ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %r13, %r14 ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
adcxq %rcx, %r13 ; \
adoxq %rcx, %r14 ; \
adcq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
adcxq %r9, %r9 ; \
adoxq %r15, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %rcx, %r15 ; \
adoxq %rcx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds exactly to bignum_sub_sm2
#define sub_sm2(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rcx ; \
sbbq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq $0xffffffff00000000, %r10 ; \
sbbq %r11, %r11 ; \
andq %r11, %r10 ; \
movq %r11, %rdx ; \
btr $0x20, %rdx ; \
addq %r11, %rax ; \
movq %rax, P0 ; \
adcq %r10, %rcx ; \
movq %rcx, 0x8+P0 ; \
adcq %r11, %r8 ; \
movq %r8, 0x10+P0 ; \
adcq %rdx, %r9 ; \
movq %r9, 0x18+P0
// Corresponds exactly to bignum_add_sm2
#define add_sm2(P0,P1,P2) \
xorq %r11, %r11 ; \
movq P1, %rax ; \
addq P2, %rax ; \
movq 0x8+P1, %rcx ; \
adcq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
adcq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
adcq 0x18+P2, %r9 ; \
adcq %r11, %r11 ; \
subq $0xffffffffffffffff, %rax ; \
movq $0xffffffff00000000, %r10 ; \
sbbq %r10, %rcx ; \
sbbq $0xffffffffffffffff, %r8 ; \
movq $0xfffffffeffffffff, %rdx ; \
sbbq %rdx, %r9 ; \
sbbq $0x0, %r11 ; \
andq %r11, %r10 ; \
andq %r11, %rdx ; \
addq %r11, %rax ; \
movq %rax, P0 ; \
adcq %r10, %rcx ; \
movq %rcx, 0x8+P0 ; \
adcq %r11, %r8 ; \
movq %r8, 0x10+P0 ; \
adcq %rdx, %r9 ; \
movq %r9, 0x18+P0
// A weak version of add that only guarantees sum in 4 digits
#define weakadd_sm2(P0,P1,P2) \
movq P1, %rax ; \
addq P2, %rax ; \
movq 0x8+P1, %rcx ; \
adcq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
adcq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
adcq 0x18+P2, %r9 ; \
movq $0xffffffff00000000, %r10 ; \
sbbq %r11, %r11 ; \
andq %r11, %r10 ; \
movq %r11, %rdx ; \
btr $0x20, %rdx ; \
subq %r11, %rax ; \
movq %rax, P0 ; \
sbbq %r10, %rcx ; \
movq %rcx, 0x8+P0 ; \
sbbq %r11, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq %rdx, %r9 ; \
movq %r9, 0x18+P0
// P0 = C * P1 - D * P2 computed as d * (p_sm2 - P2) + c * P1
// Quotient estimation is done just as q = h + 1 as in bignum_triple_sm2
// This also applies to the other functions following.
#define cmsub_sm2(P0,C,P1,D,P2) \
/* First (%r11;%r10;%r9;%r8) = p_sm2 - P2 */ \
movq $0xffffffffffffffff, %r8 ; \
movq %r8, %r10 ; \
subq P2, %r8 ; \
movq $0xffffffff00000000, %r9 ; \
sbbq 0x8+P2, %r9 ; \
sbbq 0x10+P2, %r10 ; \
movq $0xfffffffeffffffff, %r11 ; \
sbbq 0x18+P2, %r11 ; \
/* (%r12;%r11;%r10;%r9;%r8) = D * (p_sm2 - P2) */ \
xorl %r12d, %r12d ; \
movq $D, %rdx ; \
mulxq %r8, %r8, %rax ; \
mulxq %r9, %r9, %rcx ; \
addq %rax, %r9 ; \
mulxq %r10, %r10, %rax ; \
adcq %rcx, %r10 ; \
mulxq %r11, %r11, %rcx ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
/* (%rdx;%r11;%r10;%r9;%r8) = 2^256 + C * P1 + D * (p_sm2 - P2) */ \
movq $C, %rdx ; \
xorl %eax, %eax ; \
mulxq P1, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
mulxq 0x8+P1, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
mulxq 0x10+P1, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 0x18+P1, %rax, %rdx ; \
adcxq %rax, %r11 ; \
adoxq %r12, %rdx ; \
adcq $1, %rdx ; \
/* Now the tail for modular reduction from tripling */ \
movq %rdx, %rax ; \
shlq $0x20, %rax ; \
movq %rax, %rcx ; \
subq %rdx, %rax ; \
addq %rdx, %r8 ; \
adcq %rax, %r9 ; \
adcq $0x0, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rdx, %rdx ; \
notq %rdx; \
movq $0xffffffff00000000, %rax ; \
andq %rdx, %rax ; \
movq %rdx, %rcx ; \
btr $0x20, %rcx ; \
addq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rax, %r9 ; \
movq %r9, 0x8+P0 ; \
adcq %rdx, %r10 ; \
movq %r10, 0x10+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 0x18+P0
// P0 = 3 * P1 - 8 * P2, computed as (p_sm2 - P2) << 3 + 3 * P1
#define cmsub38_sm2(P0,P1,P2) \
/* First (%r11;%r10;%r9;%r8) = p_sm2 - P2 */ \
movq $0xffffffffffffffff, %r8 ; \
movq %r8, %r10 ; \
subq P2, %r8 ; \
movq $0xffffffff00000000, %r9 ; \
sbbq 0x8+P2, %r9 ; \
sbbq 0x10+P2, %r10 ; \
movq $0xfffffffeffffffff, %r11 ; \
sbbq 0x18+P2, %r11 ; \
/* (%r12;%r11;%r10;%r9;%r8) = (p_sm2 - P2) << 3 */ \
movq %r11, %r12 ; \
shldq $3, %r10, %r11 ; \
shldq $3, %r9, %r10 ; \
shldq $3, %r8, %r9 ; \
shlq $3, %r8 ; \
shrq $61, %r12 ; \
/* (%rdx;%r11;%r10;%r9;%r8) = 2^256 + 3 * P1 + 8 * (p_sm2 - P2) */ \
movq $3, %rdx ; \
xorl %eax, %eax ; \
mulxq P1, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
mulxq 0x8+P1, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
mulxq 0x10+P1, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 0x18+P1, %rax, %rdx ; \
adcxq %rax, %r11 ; \
adoxq %r12, %rdx ; \
adcq $1, %rdx ; \
/* Now the tail for modular reduction from tripling */ \
movq %rdx, %rax ; \
shlq $0x20, %rax ; \
movq %rax, %rcx ; \
subq %rdx, %rax ; \
addq %rdx, %r8 ; \
adcq %rax, %r9 ; \
adcq $0x0, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rdx, %rdx ; \
notq %rdx; \
movq $0xffffffff00000000, %rax ; \
andq %rdx, %rax ; \
movq %rdx, %rcx ; \
btr $0x20, %rcx ; \
addq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rax, %r9 ; \
movq %r9, 0x8+P0 ; \
adcq %rdx, %r10 ; \
movq %r10, 0x10+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 0x18+P0
// P0 = 4 * P1 - P2, by direct subtraction of P2,
// since the quotient estimate still works safely
// for initial value > -p_sm2
#define cmsub41_sm2(P0,P1,P2) \
movq 0x18+P1, %r11 ; \
movq %r11, %rdx ; \
movq 0x10+P1, %r10 ; \
shldq $2, %r10, %r11 ; \
movq 0x8+P1, %r9 ; \
shldq $2, %r9, %r10 ; \
movq P1, %r8 ; \
shldq $2, %r8, %r9 ; \
shlq $2, %r8 ; \
shrq $62, %rdx ; \
addq $1, %rdx ; \
subq P2, %r8 ; \
sbbq 0x8+P2, %r9 ; \
sbbq 0x10+P2, %r10 ; \
sbbq 0x18+P2, %r11 ; \
sbbq $0, %rdx ; \
/* Now the tail for modular reduction from tripling */ \
movq %rdx, %rax ; \
shlq $0x20, %rax ; \
movq %rax, %rcx ; \
subq %rdx, %rax ; \
addq %rdx, %r8 ; \
adcq %rax, %r9 ; \
adcq $0x0, %r10 ; \
adcq %rcx, %r11 ; \
sbbq %rdx, %rdx ; \
notq %rdx; \
movq $0xffffffff00000000, %rax ; \
andq %rdx, %rax ; \
movq %rdx, %rcx ; \
btr $0x20, %rcx ; \
addq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rax, %r9 ; \
movq %r9, 0x8+P0 ; \
adcq %rdx, %r10 ; \
movq %r10, 0x10+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 0x18+P0
S2N_BN_SYMBOL(sm2_montjdouble):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room on stack for temporary variables
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Main code, just a sequence of basic field operations
// z2 = z^2
// y2 = y^2
montsqr_sm2(z2,z_1)
montsqr_sm2(y2,y_1)
// x2p = x^2 - z^4 = (x + z^2) * (x - z^2)
sub_sm2(t2,x_1,z2)
weakadd_sm2(t1,x_1,z2)
montmul_sm2(x2p,t1,t2)
// t1 = y + z
// xy2 = x * y^2
// x4p = x2p^2
add_sm2(t1,y_1,z_1)
montmul_sm2(xy2,x_1,y2)
montsqr_sm2(x4p,x2p)
// t1 = (y + z)^2
montsqr_sm2(t1,t1)
// d = 12 * xy2 - 9 * x4p
// t1 = y^2 + 2 * y * z
cmsub_sm2(d,12,xy2,9,x4p)
sub_sm2(t1,t1,z2)
// y4 = y^4
montsqr_sm2(y4,y2)
// dx2 = d * x2p
montmul_sm2(dx2,d,x2p)
// z_3' = 2 * y * z
sub_sm2(z_3,t1,y2)
// x' = 4 * xy2 - d
cmsub41_sm2(x_3,xy2,d)
// y' = 3 * dx2 - 8 * y4
cmsub38_sm2(y_3,dx2,y4)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(sm2_montjdouble)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 4,934
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_mod_nsm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo group order, z := x mod n_sm2
// Input x[k]; output z[4]
//
// extern void bignum_mod_nsm2(uint64_t z[static 4], uint64_t k,
// const uint64_t *x);
//
// Reduction is modulo the group order of the GM/T 0003-2012 curve SM2.
//
// Standard x86-64 ABI: RDI = z, RSI = k, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = k, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_nsm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_nsm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_nsm2)
.text
#define z %rdi
#define k %rsi
#define x %rcx
#define m0 %r8
#define m1 %r9
#define m2 %r10
#define m3 %r11
#define d %r12
#define n0 %rax
#define n1 %rbx
#define n3 %rdx
#define q %rdx
#define qshort %edx
S2N_BN_SYMBOL(bignum_mod_nsm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save extra registers
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
// If the input is already <= 3 words long, go to a trivial "copy" path
cmpq $4, k
jc Lbignum_mod_nsm2_shortinput
// Otherwise load the top 4 digits (top-down) and reduce k by 4
subq $4, k
movq 24(%rdx,k,8), m3
movq 16(%rdx,k,8), m2
movq 8(%rdx,k,8), m1
movq (%rdx,k,8), m0
// Move x into another register to leave %rdx free for multiplies and use of n3
movq %rdx, x
// Reduce the top 4 digits mod n_sm2 (a conditional subtraction of n_sm2)
movq $0xac440bf6c62abedd, n0
movq $0x8dfc2094de39fad4, n1
movq $0x0000000100000000, n3
addq n0, m0
adcq n1, m1
adcq $0, m2
adcq n3, m3
sbbq d, d
notq d
andq d, n0
andq d, n1
andq d, n3
subq n0, m0
sbbq n1, m1
sbbq $0, m2
sbbq n3, m3
// Now do (k-4) iterations of 5->4 word modular reduction
testq k, k
jz Lbignum_mod_nsm2_writeback
Lbignum_mod_nsm2_loop:
// Writing the input, with the new zeroth digit implicitly appended, as
// z = 2^256 * m3 + 2^192 * m2 + t, our intended quotient approximation is
// MIN ((m3 * (1 + 2^32 + 2^64) + m2 + 2^64) >> 64) (2^64 - 1)
movq m2, d
movl $1, qshort
addq m3, d
adcq m3, q
shrq $32, d
addq m3, d
shrq $32, d
addq d, q
sbbq $0, q
// Load the next digit so current m to reduce = [m3;m2;m1;m0;d]
movq -8(x,k,8), d
// Now form [m3;m2;m1;m0;d] = m - q * n_sm2
subq q, m3
movq $0xac440bf6c62abedd, n0
mulxq n0, n0, n1
addq n0, d
adcq n1, m0
movq $0x8dfc2094de39fad4, n0
mulxq n0, n0, n1
adcq $0, n1
addq n0, m0
adcq n1, m1
movq $0x0000000100000000, n0
mulxq n0, n0, n1
adcq n0, m2
adcq n1, m3
// Now our top word m3 is either zero or all 1s. Use it for a masked
// addition of n_sm2, which we can do by a *subtraction* of
// 2^256 - n_sm2 from our portion
movq $0xac440bf6c62abedd, n0
andq m3, n0
movq $0x8dfc2094de39fad4, n1
andq m3, n1
movq $0x0000000100000000, n3
andq m3, n3
subq n0, d
sbbq n1, m0
sbbq $0, m1
sbbq n3, m2
// Now shuffle registers up and loop
movq m2, m3
movq m1, m2
movq m0, m1
movq d, m0
decq k
jnz Lbignum_mod_nsm2_loop
// Write back
Lbignum_mod_nsm2_writeback:
movq m0, (z)
movq m1, 8(z)
movq m2, 16(z)
movq m3, 24(z)
// Restore registers and return
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mod_nsm2)
Lbignum_mod_nsm2_shortinput:
xorq m0, m0
xorq m1, m1
xorq m2, m2
xorq m3, m3
testq k, k
jz Lbignum_mod_nsm2_writeback
movq (%rdx), m0
decq k
jz Lbignum_mod_nsm2_writeback
movq 8(%rdx), m1
decq k
jz Lbignum_mod_nsm2_writeback
movq 16(%rdx), m2
jmp Lbignum_mod_nsm2_writeback
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 6,576
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_montmul_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery multiply, z := (x * y / 2^256) mod p_sm2
// Inputs x[4], y[4]; output z[4]
//
// extern void bignum_montmul_sm2(uint64_t z[static 4], const uint64_t x[static 4],
// const uint64_t y[static 4]);
//
// Does z := (2^{-256} * x * y) mod p_sm2, assuming that the inputs x and y
// satisfy x * y <= 2^256 * p_sm2 (in particular this is true if we are in
// the "usual" case x < p_sm2 and y < p_sm2).
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_sm2)
.text
#define z %rdi
#define x %rsi
// We move the y argument here so we can use %rdx for multipliers
#define y %rcx
// Use this fairly consistently for a zero
#define zero %rbp
#define zeroe %ebp
// mulpadd(high,low,m) adds %rdx * m to a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax and %rbx as temporaries.
#define mulpadd(high,low,m) \
mulxq m, %rax, %rbx ; \
adcxq %rax, low ; \
adoxq %rbx, high
// mulpade(high,low,m) adds %rdx * m to a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax as a temporary, assuming high created from scratch
// and that zero has value zero.
#define mulpade(high,low,m) \
mulxq m, %rax, high ; \
adcxq %rax, low ; \
adoxq zero, high
// ---------------------------------------------------------------------------
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d3;d2;d1;d0] and returns result in [d0;d3;d2;d1], adding to the
// existing contents of [d3;d2;d1], and using %rax, %rcx, %rdx and %rbx
// as temporaries.
// ---------------------------------------------------------------------------
#define montreds(d3,d2,d1,d0) \
movq d0, %rax ; \
shlq $32, %rax ; \
movq d0, %rcx ; \
shrq $32, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq d0, %rax ; \
sbbq $0, %rcx ; \
subq %rax, d1 ; \
sbbq %rcx, d2 ; \
sbbq %rdx, d3 ; \
sbbq %rbx, d0
S2N_BN_SYMBOL(bignum_montmul_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Copy y into a safe register to start with
movq %rdx, y
// Zero a register, which also makes sure we don't get a fake carry-in
xorl zeroe, zeroe
// Do the zeroth row, which is a bit different
movq (y), %rdx
mulxq (x), %r8, %r9
mulxq 8(x), %rax, %r10
addq %rax, %r9
mulxq 16(x), %rax, %r11
adcq %rax, %r10
mulxq 24(x), %rax, %r12
adcq %rax, %r11
adcq zero, %r12
// Add row 1
xorl zeroe, zeroe
movq 8(y), %rdx
mulpadd(%r10,%r9,(x))
mulpadd(%r11,%r10,8(x))
mulpadd(%r12,%r11,16(x))
mulpade(%r13,%r12,24(x))
adcxq zero, %r13
// Add row 2
xorl zeroe, zeroe
movq 16(y), %rdx
mulpadd(%r11,%r10,(x))
mulpadd(%r12,%r11,8(x))
mulpadd(%r13,%r12,16(x))
mulpade(%r14,%r13,24(x))
adcxq zero, %r14
// Add row 3
xorl zeroe, zeroe
movq 24(y), %rdx
mulpadd(%r12,%r11,(x))
mulpadd(%r13,%r12,8(x))
mulpadd(%r14,%r13,16(x))
mulpade(%r15,%r14,24(x))
adcxq zero, %r15
// Multiplication complete. Perform 4 Montgomery steps to rotate the lower half
montreds(%r11,%r10,%r9,%r8)
montreds(%r8,%r11,%r10,%r9)
montreds(%r9,%r8,%r11,%r10)
montreds(%r10,%r9,%r8,%r11)
// Add high and low parts, catching carry in %rax
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
// Load [%r8;%r11;%rbp;%rdx;%rcx] = 2^320 - p_sm2 then do
// [%r8;%r11;%rbp;%rdx;%rcx] = [%rax;%r15;%r14;%r13;%r12] + (2^320 - p_sm2)
movl $1, %ecx
movl $0x00000000FFFFFFFF, %edx
xorl %ebp, %ebp
addq %r12, %rcx
leaq 1(%rdx), %r11
adcq %r13, %rdx
leaq -1(%rbp), %r8
adcq %r14, %rbp
adcq %r15, %r11
adcq %rax, %r8
// Now carry is set if r + (2^320 - p_sm2) >= 2^320, i.e. r >= p_sm2
// where r is the pre-reduced form. So conditionally select the
// output accordingly.
cmovcq %rcx, %r12
cmovcq %rdx, %r13
cmovcq %rbp, %r14
cmovcq %r11, %r15
// Write back reduced value
movq %r12, (z)
movq %r13, 8(z)
movq %r14, 16(z)
movq %r15, 24(z)
// Restore saved registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montmul_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 90,026
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_inv_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Modular inverse modulo p_sm2 = 2^256 - 2^224 - 2^96 + 2^64 - 1
// Input x[4]; output z[4]
//
// extern void bignum_inv_sm2(uint64_t z[static 4],const uint64_t x[static 4]);
//
// If the 4-digit input x is coprime to p_sm2, i.e. is not divisible
// by it, returns z < p_sm2 such that x * z == 1 (mod p_sm2). Note that
// x does not need to be reduced modulo p_sm2, but the output always is.
// If the input is divisible (i.e. is 0 or p_sm2), then there can be no
// modular inverse and z = 0 is returned.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_inv_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_inv_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_inv_sm2)
.text
// Size in bytes of a 64-bit word
#define N 8
// Pointer-offset pairs for temporaries on stack
#define f 0(%rsp)
#define g (5*N)(%rsp)
#define u (10*N)(%rsp)
#define v (15*N)(%rsp)
#define tmp (20*N)(%rsp)
#define tmp2 (21*N)(%rsp)
#define i (22*N)(%rsp)
#define d (23*N)(%rsp)
#define mat (24*N)(%rsp)
// Backup for the input pointer
#define res (28*N)(%rsp)
// Total size to reserve on the stack
#define NSPACE 30*N
// Syntactic variants to make x86_att version simpler to generate
#define F 0
#define G (5*N)
#define U (10*N)
#define V (15*N)
#define MAT (24*N)
#define ff (%rsp)
#define gg (5*N)(%rsp)
// ---------------------------------------------------------------------------
// Core signed almost-Montgomery reduction macro from u[4..0] to u[3..0].
// ---------------------------------------------------------------------------
#define amontred(P) \
/* We only know the input is -2^316 < x < 2^316. To do traditional */ \
/* unsigned Montgomery reduction, start by adding 2^61 * p_sm2. */ \
movq $0xe000000000000000, %r8 ; \
addq P, %r8 ; \
movq $0x1fffffffffffffff, %r9 ; \
adcq 8+P, %r9 ; \
movq $0xffffffffe0000000, %r10 ; \
adcq 16+P, %r10 ; \
movq $0xffffffffffffffff, %r11 ; \
adcq 24+P, %r11 ; \
movq $0x1fffffffdfffffff, %r12 ; \
adcq 32+P, %r12 ; \
/* Let [%rcx;%rbx] = 2^32 * d0 and [%rdx;%rax] = (2^32-1) * d0 */ \
movq %r8, %rbx ; \
movq %r8, %rcx ; \
shrq $32, %rcx ; \
shlq $32, %rbx ; \
movl $0xffffffff, %eax ; \
mulq %r8; \
/* Now [%r12;%r11;%r10;%r9] := [%r8;%r11;%r10;%r9] - [%rcx;%rbx;%rdx;%rax] */ \
subq %rax, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
sbbq %rcx, %r8 ; \
addq %r8, %r12 ; \
/* Now capture carry and subtract p_sm2 if set (almost-Montgomery) */ \
sbbq %rax, %rax ; \
movl $0xffffffff, %ebx ; \
notq %rbx; \
andq %rax, %rbx ; \
movq %rax, %rdx ; \
btr $32, %rdx ; \
subq %rax, %r9 ; \
movq %r9, P ; \
sbbq %rbx, %r10 ; \
movq %r10, 8+P ; \
sbbq %rax, %r11 ; \
movq %r11, 16+P ; \
sbbq %rdx, %r12 ; \
movq %r12, 24+P
// Very similar to a subroutine call to the s2n-bignum word_divstep59.
// But different in register usage and returning the final matrix as
//
// [ %r8 %r10]
// [ %r12 %r14]
//
// and also returning the matrix still negated (which doesn't matter)
#define divstep59(din,fin,gin) \
movq din, %rsi ; \
movq fin, %rdx ; \
movq gin, %rcx ; \
movq %rdx, %rbx ; \
andq $0xfffff, %rbx ; \
movabsq $0xfffffe0000000000, %rax ; \
orq %rax, %rbx ; \
andq $0xfffff, %rcx ; \
movabsq $0xc000000000000000, %rax ; \
orq %rax, %rcx ; \
movq $0xfffffffffffffffe, %rax ; \
xorl %ebp, %ebp ; \
movl $0x2, %edx ; \
movq %rbx, %rdi ; \
movq %rax, %r8 ; \
testq %rsi, %rsi ; \
cmovs %rbp, %r8 ; \
testq $0x1, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
sarq $1, %rcx ; \
movl $0x100000, %eax ; \
leaq (%rbx,%rax), %rdx ; \
leaq (%rcx,%rax), %rdi ; \
shlq $0x16, %rdx ; \
shlq $0x16, %rdi ; \
sarq $0x2b, %rdx ; \
sarq $0x2b, %rdi ; \
movabsq $0x20000100000, %rax ; \
leaq (%rbx,%rax), %rbx ; \
leaq (%rcx,%rax), %rcx ; \
sarq $0x2a, %rbx ; \
sarq $0x2a, %rcx ; \
movq %rdx, MAT(%rsp) ; \
movq %rbx, MAT+0x8(%rsp) ; \
movq %rdi, MAT+0x10(%rsp) ; \
movq %rcx, MAT+0x18(%rsp) ; \
movq fin, %r12 ; \
imulq %r12, %rdi ; \
imulq %rdx, %r12 ; \
movq gin, %r13 ; \
imulq %r13, %rbx ; \
imulq %rcx, %r13 ; \
addq %rbx, %r12 ; \
addq %rdi, %r13 ; \
sarq $0x14, %r12 ; \
sarq $0x14, %r13 ; \
movq %r12, %rbx ; \
andq $0xfffff, %rbx ; \
movabsq $0xfffffe0000000000, %rax ; \
orq %rax, %rbx ; \
movq %r13, %rcx ; \
andq $0xfffff, %rcx ; \
movabsq $0xc000000000000000, %rax ; \
orq %rax, %rcx ; \
movq $0xfffffffffffffffe, %rax ; \
movl $0x2, %edx ; \
movq %rbx, %rdi ; \
movq %rax, %r8 ; \
testq %rsi, %rsi ; \
cmovs %rbp, %r8 ; \
testq $0x1, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
sarq $1, %rcx ; \
movl $0x100000, %eax ; \
leaq (%rbx,%rax), %r8 ; \
leaq (%rcx,%rax), %r10 ; \
shlq $0x16, %r8 ; \
shlq $0x16, %r10 ; \
sarq $0x2b, %r8 ; \
sarq $0x2b, %r10 ; \
movabsq $0x20000100000, %rax ; \
leaq (%rbx,%rax), %r15 ; \
leaq (%rcx,%rax), %r11 ; \
sarq $0x2a, %r15 ; \
sarq $0x2a, %r11 ; \
movq %r13, %rbx ; \
movq %r12, %rcx ; \
imulq %r8, %r12 ; \
imulq %r15, %rbx ; \
addq %rbx, %r12 ; \
imulq %r11, %r13 ; \
imulq %r10, %rcx ; \
addq %rcx, %r13 ; \
sarq $0x14, %r12 ; \
sarq $0x14, %r13 ; \
movq %r12, %rbx ; \
andq $0xfffff, %rbx ; \
movabsq $0xfffffe0000000000, %rax ; \
orq %rax, %rbx ; \
movq %r13, %rcx ; \
andq $0xfffff, %rcx ; \
movabsq $0xc000000000000000, %rax ; \
orq %rax, %rcx ; \
movq MAT(%rsp), %rax ; \
imulq %r8, %rax ; \
movq MAT+0x10(%rsp), %rdx ; \
imulq %r15, %rdx ; \
imulq MAT+0x8(%rsp), %r8 ; \
imulq MAT+0x18(%rsp), %r15 ; \
addq %r8, %r15 ; \
leaq (%rax,%rdx), %r9 ; \
movq MAT(%rsp), %rax ; \
imulq %r10, %rax ; \
movq MAT+0x10(%rsp), %rdx ; \
imulq %r11, %rdx ; \
imulq MAT+0x8(%rsp), %r10 ; \
imulq MAT+0x18(%rsp), %r11 ; \
addq %r10, %r11 ; \
leaq (%rax,%rdx), %r13 ; \
movq $0xfffffffffffffffe, %rax ; \
movl $0x2, %edx ; \
movq %rbx, %rdi ; \
movq %rax, %r8 ; \
testq %rsi, %rsi ; \
cmovs %rbp, %r8 ; \
testq $0x1, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
sarq $1, %rcx ; \
movl $0x100000, %eax ; \
leaq (%rbx,%rax), %r8 ; \
leaq (%rcx,%rax), %r12 ; \
shlq $0x15, %r8 ; \
shlq $0x15, %r12 ; \
sarq $0x2b, %r8 ; \
sarq $0x2b, %r12 ; \
movabsq $0x20000100000, %rax ; \
leaq (%rbx,%rax), %r10 ; \
leaq (%rcx,%rax), %r14 ; \
sarq $0x2b, %r10 ; \
sarq $0x2b, %r14 ; \
movq %r9, %rax ; \
imulq %r8, %rax ; \
movq %r13, %rdx ; \
imulq %r10, %rdx ; \
imulq %r15, %r8 ; \
imulq %r11, %r10 ; \
addq %r8, %r10 ; \
leaq (%rax,%rdx), %r8 ; \
movq %r9, %rax ; \
imulq %r12, %rax ; \
movq %r13, %rdx ; \
imulq %r14, %rdx ; \
imulq %r15, %r12 ; \
imulq %r11, %r14 ; \
addq %r12, %r14 ; \
leaq (%rax,%rdx), %r12
S2N_BN_SYMBOL(bignum_inv_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room for temporaries
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Save the return pointer for the end so we can overwrite %rdi later
movq %rdi, res
// Create constant [%rdx;%rcx;%rbx;%rax] = p_sm2 and copy it into the variable f
// including the 5th zero digit
xorl %ebp, %ebp
leaq -1(%rbp), %rax
movl $0x00000000ffffffff, %ebx
notq %rbx
movq %rax, %rcx
movq %rax, %rdx
btr $32, %rdx
movq %rax, F(%rsp)
movq %rbx, F+8(%rsp)
movq %rcx, F+16(%rsp)
movq %rdx, F+24(%rsp)
movq %rbp, F+32(%rsp)
// Now reduce the input modulo p_sm2, first negating the constant to get
// [%rdx;%rcx;%rbx;%rax] = 2^256 - p_sm2, adding it to x and hence getting
// the comparison x < p_sm2 <=> (2^256 - p_sm2) + x < 2^256 and choosing
// g accordingly.
movq (%rsi), %r8
movq 8(%rsi), %r9
movq 16(%rsi), %r10
movq 24(%rsi), %r11
movl $1, %eax
notq %rbx
xorl %ecx, %ecx
notq %rdx
addq %r8, %rax
adcq %r9, %rbx
adcq %r10, %rcx
adcq %r11, %rdx
cmovncq %r8, %rax
cmovncq %r9, %rbx
cmovncq %r10, %rcx
cmovncq %r11, %rdx
movq %rax, G(%rsp)
movq %rbx, G+8(%rsp)
movq %rcx, G+16(%rsp)
movq %rdx, G+24(%rsp)
xorl %eax, %eax
movq %rax, G+32(%rsp)
// Also maintain reduced < 2^256 vector [u,v] such that
// [f,g] == x * 2^{5*i-50} * [u,v] (mod p_sm2)
// starting with [p_sm2,x] == x * 2^{5*0-50} * [0,2^50] (mod p_sm2)
// The weird-looking 5*i modifications come in because we are doing
// 64-bit word-sized Montgomery reductions at each stage, which is
// 5 bits more than the 59-bit requirement to keep things stable.
xorl %eax, %eax
movq %rax, U(%rsp)
movq %rax, U+8(%rsp)
movq %rax, U+16(%rsp)
movq %rax, U+24(%rsp)
movq $0x0004000000000000, %rcx
movq %rcx, V(%rsp)
movq %rax, V+8(%rsp)
movq %rax, V+16(%rsp)
movq %rax, V+24(%rsp)
// Start of main loop. We jump into the middle so that the divstep
// portion is common to the special tenth iteration after a uniform
// first 9.
movq $10, i
movq $1, d
jmp Lbignum_inv_sm2_midloop
Lbignum_inv_sm2_loop:
// Separate out the matrix into sign-magnitude pairs
movq %r8, %r9
sarq $63, %r9
xorq %r9, %r8
subq %r9, %r8
movq %r10, %r11
sarq $63, %r11
xorq %r11, %r10
subq %r11, %r10
movq %r12, %r13
sarq $63, %r13
xorq %r13, %r12
subq %r13, %r12
movq %r14, %r15
sarq $63, %r15
xorq %r15, %r14
subq %r15, %r14
// Adjust the initial values to allow for complement instead of negation
// This initial offset is the same for [f,g] and [u,v] compositions.
// Save it in temporary storage for the [u,v] part and do [f,g] first.
movq %r8, %rax
andq %r9, %rax
movq %r10, %rdi
andq %r11, %rdi
addq %rax, %rdi
movq %rdi, tmp
movq %r12, %rax
andq %r13, %rax
movq %r14, %rsi
andq %r15, %rsi
addq %rax, %rsi
movq %rsi, tmp2
// Now the computation of the updated f and g values. This maintains a
// 2-word carry between stages so we can conveniently insert the shift
// right by 59 before storing back, and not overwrite digits we need
// again of the old f and g values.
//
// Digit 0 of [f,g]
xorl %ebx, %ebx
movq F(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rdi
adcq %rdx, %rbx
movq G(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rdi
adcq %rdx, %rbx
xorl %ebp, %ebp
movq F(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rsi
adcq %rdx, %rbp
movq G(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rsi
adcq %rdx, %rbp
// Digit 1 of [f,g]
xorl %ecx, %ecx
movq F+N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq G+N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
shrdq $59, %rbx, %rdi
movq %rdi, F(%rsp)
xorl %edi, %edi
movq F+N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rbp
adcq %rdx, %rdi
movq G+N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rdi
shrdq $59, %rbp, %rsi
movq %rsi, G(%rsp)
// Digit 2 of [f,g]
xorl %esi, %esi
movq F+2*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rsi
movq G+2*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
adcq %rdx, %rsi
shrdq $59, %rcx, %rbx
movq %rbx, F+N(%rsp)
xorl %ebx, %ebx
movq F+2*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rdi
adcq %rdx, %rbx
movq G+2*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rdi
adcq %rdx, %rbx
shrdq $59, %rdi, %rbp
movq %rbp, G+N(%rsp)
// Digits 3 and 4 of [f,g]
movq F+3*N(%rsp), %rax
xorq %r9, %rax
movq F+4*N(%rsp), %rbp
xorq %r9, %rbp
andq %r8, %rbp
negq %rbp
mulq %r8
addq %rax, %rsi
adcq %rdx, %rbp
movq G+3*N(%rsp), %rax
xorq %r11, %rax
movq G+4*N(%rsp), %rdx
xorq %r11, %rdx
andq %r10, %rdx
subq %rdx, %rbp
mulq %r10
addq %rax, %rsi
adcq %rdx, %rbp
shrdq $59, %rsi, %rcx
movq %rcx, F+2*N(%rsp)
shrdq $59, %rbp, %rsi
sarq $59, %rbp
movq F+3*N(%rsp), %rax
movq %rsi, F+3*N(%rsp)
movq F+4*N(%rsp), %rsi
movq %rbp, F+4*N(%rsp)
xorq %r13, %rax
xorq %r13, %rsi
andq %r12, %rsi
negq %rsi
mulq %r12
addq %rax, %rbx
adcq %rdx, %rsi
movq G+3*N(%rsp), %rax
xorq %r15, %rax
movq G+4*N(%rsp), %rdx
xorq %r15, %rdx
andq %r14, %rdx
subq %rdx, %rsi
mulq %r14
addq %rax, %rbx
adcq %rdx, %rsi
shrdq $59, %rbx, %rdi
movq %rdi, G+2*N(%rsp)
shrdq $59, %rsi, %rbx
movq %rbx, G+3*N(%rsp)
sarq $59, %rsi
movq %rsi, G+4*N(%rsp)
// Get the initial carries back from storage and do the [u,v] accumulation
movq tmp, %rbx
movq tmp2, %rbp
// Digit 0 of [u,v]
xorl %ecx, %ecx
movq U(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
xorl %esi, %esi
movq U(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rbx, U(%rsp)
addq %rax, %rbp
adcq %rdx, %rsi
movq V(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rsi
movq %rbp, V(%rsp)
// Digit 1 of [u,v]
xorl %ebx, %ebx
movq U+N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
adcq %rdx, %rbx
xorl %ebp, %ebp
movq U+N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rcx, U+N(%rsp)
addq %rax, %rsi
adcq %rdx, %rbp
movq V+N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rsi
adcq %rdx, %rbp
movq %rsi, V+N(%rsp)
// Digit 2 of [u,v]
xorl %ecx, %ecx
movq U+2*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V+2*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
xorl %esi, %esi
movq U+2*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rbx, U+2*N(%rsp)
addq %rax, %rbp
adcq %rdx, %rsi
movq V+2*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rsi
movq %rbp, V+2*N(%rsp)
// Digits 3 and 4 of u (top is unsigned)
movq U+3*N(%rsp), %rax
xorq %r9, %rax
movq %r9, %rbx
andq %r8, %rbx
negq %rbx
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+3*N(%rsp), %rax
xorq %r11, %rax
movq %r11, %rdx
andq %r10, %rdx
subq %rdx, %rbx
mulq %r10
addq %rax, %rcx
adcq %rbx, %rdx
// Preload for last use of old u digit 3
movq U+3*N(%rsp), %rax
movq %rcx, U+3*N(%rsp)
movq %rdx, U+4*N(%rsp)
// Digits 3 and 4 of v (top is unsigned)
xorq %r13, %rax
movq %r13, %rcx
andq %r12, %rcx
negq %rcx
mulq %r12
addq %rax, %rsi
adcq %rdx, %rcx
movq V+3*N(%rsp), %rax
xorq %r15, %rax
movq %r15, %rdx
andq %r14, %rdx
subq %rdx, %rcx
mulq %r14
addq %rax, %rsi
adcq %rcx, %rdx
movq %rsi, V+3*N(%rsp)
movq %rdx, V+4*N(%rsp)
// Montgomery reduction of u
amontred(u)
// Montgomery reduction of v
amontred(v)
Lbignum_inv_sm2_midloop:
divstep59(d,ff,gg)
movq %rsi, d
// Next iteration
decq i
jnz Lbignum_inv_sm2_loop
// The 10th and last iteration does not need anything except the
// u value and the sign of f; the latter can be obtained from the
// lowest word of f. So it's done differently from the main loop.
// Find the sign of the new f. For this we just need one digit
// since we know (for in-scope cases) that f is either +1 or -1.
// We don't explicitly shift right by 59 either, but looking at
// bit 63 (or any bit >= 60) of the unshifted result is enough
// to distinguish -1 from +1; this is then made into a mask.
movq F(%rsp), %rax
movq G(%rsp), %rcx
imulq %r8, %rax
imulq %r10, %rcx
addq %rcx, %rax
sarq $63, %rax
// Now separate out the matrix into sign-magnitude pairs
// and adjust each one based on the sign of f.
//
// Note that at this point we expect |f|=1 and we got its
// sign above, so then since [f,0] == x * [u,v] (mod p_sm2)
// we want to flip the sign of u according to that of f.
movq %r8, %r9
sarq $63, %r9
xorq %r9, %r8
subq %r9, %r8
xorq %rax, %r9
movq %r10, %r11
sarq $63, %r11
xorq %r11, %r10
subq %r11, %r10
xorq %rax, %r11
movq %r12, %r13
sarq $63, %r13
xorq %r13, %r12
subq %r13, %r12
xorq %rax, %r13
movq %r14, %r15
sarq $63, %r15
xorq %r15, %r14
subq %r15, %r14
xorq %rax, %r15
// Adjust the initial value to allow for complement instead of negation
movq %r8, %rax
andq %r9, %rax
movq %r10, %r12
andq %r11, %r12
addq %rax, %r12
// Digit 0 of [u]
xorl %r13d, %r13d
movq U(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %r12
adcq %rdx, %r13
movq V(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %r12
adcq %rdx, %r13
// Digit 1 of [u]
xorl %r14d, %r14d
movq U+N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %r13
adcq %rdx, %r14
movq V+N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %r13
adcq %rdx, %r14
// Digit 2 of [u]
xorl %r15d, %r15d
movq U+2*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %r14
adcq %rdx, %r15
movq V+2*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %r14
adcq %rdx, %r15
// Digits 3 and 4 of u (top is unsigned)
movq U+3*N(%rsp), %rax
xorq %r9, %rax
andq %r8, %r9
negq %r9
mulq %r8
addq %rax, %r15
adcq %rdx, %r9
movq V+3*N(%rsp), %rax
xorq %r11, %rax
movq %r11, %rdx
andq %r10, %rdx
subq %rdx, %r9
mulq %r10
addq %rax, %r15
adcq %rdx, %r9
// Store back and Montgomery reduce u
movq %r12, U(%rsp)
movq %r13, U+N(%rsp)
movq %r14, U+2*N(%rsp)
movq %r15, U+3*N(%rsp)
movq %r9, U+4*N(%rsp)
amontred(u)
// Perform final strict reduction mod p_sm2 and copy to output
movq U(%rsp), %r8
movq U+N(%rsp), %r9
movq U+2*N(%rsp), %r10
movq U+3*N(%rsp), %r11
movl $1, %eax
movl $0x00000000ffffffff, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
bts $32, %rdx
addq %r8, %rax
adcq %r9, %rbx
adcq %r10, %rcx
adcq %r11, %rdx
cmovncq %r8, %rax
cmovncq %r9, %rbx
cmovncq %r10, %rcx
cmovncq %r11, %rdx
movq res, %rdi
movq %rax, (%rdi)
movq %rbx, N(%rdi)
movq %rcx, 2*N(%rdi)
movq %rdx, 3*N(%rdi)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_inv_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 5,089
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_mod_nsm2_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo group order, z := x mod n_sm2
// Input x[k]; output z[4]
//
// extern void bignum_mod_nsm2_alt(uint64_t z[static 4], uint64_t k,
// const uint64_t *x);
//
// Reduction is modulo the group order of the GM/T 0003-2012 curve SM2.
//
// Standard x86-64 ABI: RDI = z, RSI = k, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = k, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_nsm2_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_nsm2_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_nsm2_alt)
.text
#define z %rdi
#define k %rsi
#define x %rcx
#define m0 %r8
#define m1 %r9
#define m2 %r10
#define m3 %r11
#define d %r12
#define n0 %rax
#define n1 %rbx
#define n3 %rdx
#define q %rbx
#define qshort %ebx
S2N_BN_SYMBOL(bignum_mod_nsm2_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save extra registers
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
// If the input is already <= 3 words long, go to a trivial "copy" path
cmpq $4, k
jc Lbignum_mod_nsm2_alt_shortinput
// Otherwise load the top 4 digits (top-down) and reduce k by 4
subq $4, k
movq 24(%rdx,k,8), m3
movq 16(%rdx,k,8), m2
movq 8(%rdx,k,8), m1
movq (%rdx,k,8), m0
// Move x into another register to leave %rdx free for multiplies and use of n3
movq %rdx, x
// Reduce the top 4 digits mod n_sm2 (a conditional subtraction of n_sm2)
movq $0xac440bf6c62abedd, n0
movq $0x8dfc2094de39fad4, n1
movq $0x0000000100000000, n3
addq n0, m0
adcq n1, m1
adcq $0, m2
adcq n3, m3
sbbq d, d
notq d
andq d, n0
andq d, n1
andq d, n3
subq n0, m0
sbbq n1, m1
sbbq $0, m2
sbbq n3, m3
// Now do (k-4) iterations of 5->4 word modular reduction
testq k, k
jz Lbignum_mod_nsm2_alt_writeback
Lbignum_mod_nsm2_alt_loop:
// Writing the input, with the new zeroth digit implicitly appended, as
// z = 2^256 * m3 + 2^192 * m2 + t, our intended quotient approximation is
// MIN ((m3 * (1 + 2^32 + 2^64) + m2 + 2^64) >> 64) (2^64 - 1)
movq m2, d
movl $1, qshort
addq m3, d
adcq m3, q
shrq $32, d
addq m3, d
shrq $32, d
addq d, q
sbbq $0, q
// Load the next digit so current m to reduce = [m3;m2;m1;m0;d]
movq -8(x,k,8), d
// Now form [m3;m2;m1;m0;d] = m - q * n_sm2
subq q, m3
movq $0xac440bf6c62abedd, %rax
mulq q
addq %rax, d
adcq %rdx, m0
adcq $0, m1
adcq $0, m2
adcq $0, m3
movq $0x8dfc2094de39fad4, %rax
mulq q
addq %rax, m0
adcq %rdx, m1
adcq $0, m2
adcq $0, m3
movq $0x0000000100000000, %rax
mulq q
addq %rax, m2
adcq %rdx, m3
// Now our top word m3 is either zero or all 1s. Use it for a masked
// addition of n_sm2, which we can do by a *subtraction* of
// 2^256 - n_sm2 from our portion
movq $0xac440bf6c62abedd, n0
andq m3, n0
movq $0x8dfc2094de39fad4, n1
andq m3, n1
movq $0x0000000100000000, n3
andq m3, n3
subq n0, d
sbbq n1, m0
sbbq $0, m1
sbbq n3, m2
// Now shuffle registers up and loop
movq m2, m3
movq m1, m2
movq m0, m1
movq d, m0
decq k
jnz Lbignum_mod_nsm2_alt_loop
// Write back
Lbignum_mod_nsm2_alt_writeback:
movq m0, (z)
movq m1, 8(z)
movq m2, 16(z)
movq m3, 24(z)
// Restore registers and return
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mod_nsm2_alt)
Lbignum_mod_nsm2_alt_shortinput:
xorq m0, m0
xorq m1, m1
xorq m2, m2
xorq m3, m3
testq k, k
jz Lbignum_mod_nsm2_alt_writeback
movq (%rdx), m0
decq k
jz Lbignum_mod_nsm2_alt_writeback
movq 8(%rdx), m1
decq k
jz Lbignum_mod_nsm2_alt_writeback
movq 16(%rdx), m2
jmp Lbignum_mod_nsm2_alt_writeback
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 26,357
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/sm2_montjadd.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates
//
// extern void sm2_montjadd(uint64_t p3[static 12], const uint64_t p1[static 12],
// const uint64_t p2[static 12]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjadd)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjadd)
S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjadd)
.text
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1 and %rbp = p2,
// which needs to be set up explicitly before use.
// By design, none of the code macros modify any of
// these, so we maintain the assignments throughout.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_2 0(%rbp)
#define y_2 NUMSIZE(%rbp)
#define z_2 (2*NUMSIZE)(%rbp)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z1sq (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define x1a (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define z2sq (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define y1a (NUMSIZE*6)(%rsp)
#define NSPACE NUMSIZE*7
// Corresponds to bignum_montmul_sm2 except for registers
#define montmul_sm2(P0,P1,P2) \
xorl %ecx, %ecx ; \
movq P2, %rdx ; \
mulxq P1, %r8, %r9 ; \
mulxq 0x8+P1, %rax, %r10 ; \
addq %rax, %r9 ; \
mulxq 0x10+P1, %rax, %r11 ; \
adcq %rax, %r10 ; \
mulxq 0x18+P1, %rax, %r12 ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
xorl %ecx, %ecx ; \
movq 0x8+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rcx, %r13 ; \
xorl %ecx, %ecx ; \
movq 0x10+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
adcxq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
movq 0x18+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %r15 ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
adcxq %rcx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds to bignum_montsqr_sm2 except for registers
#define montsqr_sm2(P0,P1) \
movq P1, %rdx ; \
mulxq %rdx, %r8, %r15 ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %r13, %r14 ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
adcxq %rcx, %r13 ; \
adoxq %rcx, %r14 ; \
adcq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
adcxq %r9, %r9 ; \
adoxq %r15, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %rcx, %r15 ; \
adoxq %rcx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Almost-Montgomery variant which we use when an input to other muls
// with the other argument fully reduced (which is always safe).
#define amontsqr_sm2(P0,P1) \
movq P1, %rdx ; \
mulxq %rdx, %r8, %r15 ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %r13, %r14 ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
adcxq %rcx, %r13 ; \
adoxq %rcx, %r14 ; \
adcq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
adcxq %r9, %r9 ; \
adoxq %r15, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %rcx, %r15 ; \
adoxq %rcx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
sbbq %rax, %rax ; \
movq $0xffffffff00000000, %rbx ; \
movq %rax, %rcx ; \
andq %rax, %rbx ; \
btr $32, %rcx ; \
subq %rax, %r12 ; \
sbbq %rbx, %r13 ; \
sbbq %rax, %r14 ; \
sbbq %rcx, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds exactly to bignum_sub_sm2
#define sub_sm2(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rcx ; \
sbbq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq $0xffffffff00000000, %r10 ; \
sbbq %r11, %r11 ; \
andq %r11, %r10 ; \
movq %r11, %rdx ; \
btr $0x20, %rdx ; \
addq %r11, %rax ; \
movq %rax, P0 ; \
adcq %r10, %rcx ; \
movq %rcx, 0x8+P0 ; \
adcq %r11, %r8 ; \
movq %r8, 0x10+P0 ; \
adcq %rdx, %r9 ; \
movq %r9, 0x18+P0
// Additional macros to help with final multiplexing
#define load4(r0,r1,r2,r3,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3
#define store4(P,r0,r1,r2,r3) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P
#define czload4(r0,r1,r2,r3,P) \
cmovzq P, r0 ; \
cmovzq 8+P, r1 ; \
cmovzq 16+P, r2 ; \
cmovzq 24+P, r3
#define muxload4(r0,r1,r2,r3,P0,P1,P2) \
movq P0, r0 ; \
cmovbq P1, r0 ; \
cmovnbe P2, r0 ; \
movq 8+P0, r1 ; \
cmovbq 8+P1, r1 ; \
cmovnbe 8+P2, r1 ; \
movq 16+P0, r2 ; \
cmovbq 16+P1, r2 ; \
cmovnbe 16+P2, r2 ; \
movq 24+P0, r3 ; \
cmovbq 24+P1, r3 ; \
cmovnbe 24+P2, r3
S2N_BN_SYMBOL(sm2_montjadd):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
// Put the input y in %rbp where it lasts throughout the main code.
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rdx, %rbp
// Main code, just a sequence of basic field operations
// 12 * multiply + 4 * square + 7 * subtract
amontsqr_sm2(z1sq,z_1)
amontsqr_sm2(z2sq,z_2)
montmul_sm2(y1a,z_2,y_1)
montmul_sm2(y2a,z_1,y_2)
montmul_sm2(x2a,z1sq,x_2)
montmul_sm2(x1a,z2sq,x_1)
montmul_sm2(y2a,z1sq,y2a)
montmul_sm2(y1a,z2sq,y1a)
sub_sm2(xd,x2a,x1a)
sub_sm2(yd,y2a,y1a)
amontsqr_sm2(zz,xd)
montsqr_sm2(ww,yd)
montmul_sm2(zzx1,zz,x1a)
montmul_sm2(zzx2,zz,x2a)
sub_sm2(resx,ww,zzx1)
sub_sm2(t1,zzx2,zzx1)
montmul_sm2(xd,xd,z_1)
sub_sm2(resx,resx,zzx2)
sub_sm2(t2,zzx1,resx)
montmul_sm2(t1,t1,y1a)
montmul_sm2(resz,xd,z_2)
montmul_sm2(t2,yd,t2)
sub_sm2(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "NBE" <=> ~(CF \/ ZF) <=> P1 = 0 /\ ~(P2 = 0)
// and "B" <=> CF <=> ~(P1 = 0) /\ P2 = 0
// and "Z" <=> ZF <=> (P1 = 0 <=> P2 = 0)
load4(%r8,%r9,%r10,%r11,z_1)
movq %r8, %rax
movq %r9, %rdx
orq %r10, %rax
orq %r11, %rdx
orq %rdx, %rax
negq %rax
sbbq %rax, %rax
load4(%r12,%r13,%r14,%r15,z_2)
movq %r12, %rbx
movq %r13, %rdx
orq %r14, %rbx
orq %r15, %rdx
orq %rdx, %rbx
negq %rbx
sbbq %rbx, %rbx
cmpq %rax, %rbx
// Multiplex the outputs accordingly, re-using the z's in registers
cmovbq %r8, %r12
cmovbq %r9, %r13
cmovbq %r10, %r14
cmovbq %r11, %r15
czload4(%r12,%r13,%r14,%r15,resz)
muxload4(%rax,%rbx,%rcx,%rdx,resx,x_1,x_2)
muxload4(%r8,%r9,%r10,%r11,resy,y_1,y_2)
// Finally store back the multiplexed values
store4(x_3,%rax,%rbx,%rcx,%rdx)
store4(y_3,%r8,%r9,%r10,%r11)
store4(z_3,%r12,%r13,%r14,%r15)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(sm2_montjadd)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 3,359
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_triple_sm2_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Triple modulo p_sm2, z := (3 * x) mod p_sm2
// Input x[4]; output z[4]
//
// extern void bignum_triple_sm2_alt(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// The input x can be any 4-digit bignum, not necessarily reduced modulo p_sm2,
// and the result is always fully reduced, i.e. z = (3 * x) mod p_sm2.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_sm2_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_sm2_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_sm2_alt)
.text
#define z %rdi
#define x %rsi
// Main digits of intermediate results
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
// Quotient estimate = top of product + 1
#define q %rdx
#define h %rdx
// Other temporary variables and their short version
#define a %rax
#define c %rcx
#define d %rdx
#define ashort %eax
#define cshort %ecx
S2N_BN_SYMBOL(bignum_triple_sm2_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// First do the multiplication by 3, getting z = [h; d3; ...; d0]
// but immediately form the quotient estimate q = h + 1
movl $3, cshort
movq (x), a
mulq c
movq a, d0
movq d, d1
movq 8(x), a
xorq d2, d2
mulq c
addq a, d1
adcq d, d2
movq 16(x), a
xorq d3, d3
mulq c
addq a, d2
adcq d, d3
movq 24(x), a
mulq c
addq a, d3
// For this limited range a simple quotient estimate of q = h + 1 works, where
// h = floor(z / 2^256). Then -p_sm2 <= z - q * p_sm2 < p_sm2, so we just need
// to subtract q * p_sm2 and then if that's negative, add back p_sm2.
adcq $1, q
// Now compute the initial pre-reduced [h;d3;d2;d1;d0] = z - p_sm2 * q
// = z - (2^256 - 2^224 - 2^96 + 2^64 - 1) * q
movq q, a
shlq $32, a
movq a, c
subq q, a
addq q, d0
adcq a, d1
adcq $0, d2
adcq c, d3
sbbq h, h
notq h
// Now our top word h is either zero or all 1s, and we use this to discriminate
// whether a correction is needed because our result is negative, as a bitmask
// Do a masked addition of p_sm2
movq $0xffffffff00000000, a
andq h, a
movq $0xfffffffeffffffff, c
andq h, c
addq h, d0
movq d0, (z)
adcq a, d1
movq d1, 8(z)
adcq h, d2
movq d2, 16(z)
adcq c, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_triple_sm2_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,840
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_optneg_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Optionally negate modulo p_sm2, z := (-x) mod p_sm2 (if p nonzero) or
// z := x (if p zero), assuming x reduced
// Inputs p, x[4]; output z[4]
//
// extern void bignum_optneg_sm2(uint64_t z[static 4], uint64_t p,
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = p, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = p, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optneg_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optneg_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optneg_sm2)
.text
#define z %rdi
#define q %rsi
#define x %rdx
#define n0 %rax
#define n1 %rcx
#define n2 %r8
#define n3 %r9
S2N_BN_SYMBOL(bignum_optneg_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Adjust q by zeroing it if the input is zero (to avoid giving -0 = p_sm2,
// which is not strictly reduced even though it's correct modulo p_sm2).
// This step is redundant if we know a priori that the input is nonzero, which
// is the case for the y coordinate of points on the SM2 curve, for example.
movq (x), n0
orq 8(x), n0
movq 16(x), n1
orq 24(x), n1
orq n1, n0
negq n0
sbbq n0, n0
andq n0, q
// Turn q into a bitmask, all 1s for q=false, all 0s for q=true
negq q
sbbq q, q
notq q
// Let [n3;n2;n1;n0] = if q then p_sm2 else -1
movq $0xffffffffffffffff, n0
movq $0xffffffff00000000, n1
orq q, n1
movq n0, n2
movq $0xfffffffeffffffff, n3
orq q, n3
// Subtract so [n3;n2;n1;n0] = if q then p_sm2 - x else -1 - x
subq (x), n0
sbbq 8(x), n1
sbbq 16(x), n2
sbbq 24(x), n3
// XOR the words with the bitmask, which in the case q = false has the
// effect of restoring ~(-1 - x) = -(-1 - x) - 1 = 1 + x - 1 = x
// and write back the digits to the output
xorq q, n0
movq n0, (z)
xorq q, n1
movq n1, 8(z)
xorq q, n2
movq n2, 16(z)
xorq q, n3
movq n3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_optneg_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,606
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_add_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Add modulo p_sm2, z := (x + y) mod p_sm2, assuming x and y reduced
// Inputs x[4], y[4]; output z[4]
//
// extern void bignum_add_sm2(uint64_t z[static 4], const uint64_t x[static 4],
// const uint64_t y[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_add_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_add_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_add_sm2)
.text
#define z %rdi
#define x %rsi
#define y %rdx
#define d0 %rax
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define n1 %r10
#define n3 %rdx
#define c %r11
#define n1short %r10d
S2N_BN_SYMBOL(bignum_add_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Load and add the two inputs as 2^256 * c + [d3;d2;d1;d0] = x + y
xorq c, c
movq (x), d0
addq (y), d0
movq 8(x), d1
adcq 8(y), d1
movq 16(x), d2
adcq 16(y), d2
movq 24(x), d3
adcq 24(y), d3
adcq c, c
// Now subtract 2^256 * c + [d3;d3;d1;d1] = x + y - p_sm2
// The constants n1 and n3 in [n3; 0; n1; -1] = p_sm2 are saved for later
subq $-1, d0
movq $0xffffffff00000000, n1
sbbq n1, d1
sbbq $-1, d2
movq $0xfffffffeffffffff, n3
sbbq n3, d3
// Since by hypothesis x < p_sm2 we know x + y - p_sm2 < 2^256, so the top
// carry c actually gives us a bitmask for x + y - p_sm2 < 0, which we
// now use to make a masked p_sm2' = [n3; 0; n1; c]
sbbq $0, c
andq c, n1
andq c, n3
// Do the corrective addition and copy to output
addq c, d0
movq d0, (z)
adcq n1, d1
movq d1, 8(z)
adcq c, d2
movq d2, 16(z)
adcq n3, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_add_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 25,919
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/sm2_montjmixadd.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point mixed addition on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates
//
// extern void sm2_montjmixadd(uint64_t p3[static 12],
// const uint64_t p1[static 12],
// const uint64_t p2[static 8]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
// The "mixed" part means that p2 only has x and y coordinates, with the
// implicit z coordinate assumed to be the identity.
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjmixadd)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjmixadd)
S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjmixadd)
.text
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1 and %rbp = p2,
// which needs to be set up explicitly before use.
// By design, none of the code macros modify any of
// these, so we maintain the assignments throughout.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_2 0(%rbp)
#define y_2 NUMSIZE(%rbp)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define zp2 (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define NSPACE NUMSIZE*6
// Corresponds to bignum_montmul_sm2 except for registers
#define montmul_sm2(P0,P1,P2) \
xorl %ecx, %ecx ; \
movq P2, %rdx ; \
mulxq P1, %r8, %r9 ; \
mulxq 0x8+P1, %rax, %r10 ; \
addq %rax, %r9 ; \
mulxq 0x10+P1, %rax, %r11 ; \
adcq %rax, %r10 ; \
mulxq 0x18+P1, %rax, %r12 ; \
adcq %rax, %r11 ; \
adcq %rcx, %r12 ; \
xorl %ecx, %ecx ; \
movq 0x8+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcxq %rcx, %r13 ; \
xorl %ecx, %ecx ; \
movq 0x10+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
adcxq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
movq 0x18+P2, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %r15 ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
adcxq %rcx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds to bignum_montsqr_sm2 except for registers
#define montsqr_sm2(P0,P1) \
movq P1, %rdx ; \
mulxq %rdx, %r8, %r15 ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %r13, %r14 ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
adcxq %rcx, %r13 ; \
adoxq %rcx, %r14 ; \
adcq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
adcxq %r9, %r9 ; \
adoxq %r15, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %rcx, %r15 ; \
adoxq %rcx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Almost-Montgomery variant which we use when an input to other muls
// with the other argument fully reduced (which is always safe).
#define amontsqr_sm2(P0,P1) \
movq P1, %rdx ; \
mulxq %rdx, %r8, %r15 ; \
mulxq 0x8+P1, %r9, %r10 ; \
mulxq 0x18+P1, %r11, %r12 ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %r13, %r14 ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq 0x18+P1, %rdx ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
adcxq %rcx, %r13 ; \
adoxq %rcx, %r14 ; \
adcq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
adcxq %r9, %r9 ; \
adoxq %r15, %r9 ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r10, %r10 ; \
adoxq %rax, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rdx, %r11 ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rax, %rdx ; \
adcxq %r12, %r12 ; \
adoxq %rax, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rdx, %r13 ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rax, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rax, %r14 ; \
adcxq %rcx, %r15 ; \
adoxq %rcx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
sbbq %rax, %rax ; \
movq $0xffffffff00000000, %rbx ; \
movq %rax, %rcx ; \
andq %rax, %rbx ; \
btr $32, %rcx ; \
subq %rax, %r12 ; \
sbbq %rbx, %r13 ; \
sbbq %rax, %r14 ; \
sbbq %rcx, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds exactly to bignum_sub_sm2
#define sub_sm2(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rcx ; \
sbbq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq $0xffffffff00000000, %r10 ; \
sbbq %r11, %r11 ; \
andq %r11, %r10 ; \
movq %r11, %rdx ; \
btr $0x20, %rdx ; \
addq %r11, %rax ; \
movq %rax, P0 ; \
adcq %r10, %rcx ; \
movq %rcx, 0x8+P0 ; \
adcq %r11, %r8 ; \
movq %r8, 0x10+P0 ; \
adcq %rdx, %r9 ; \
movq %r9, 0x18+P0
// Additional macros to help with final multiplexing
#define testzero4(P) \
movq P, %rax ; \
movq 8+P, %rdx ; \
orq 16+P, %rax ; \
orq 24+P, %rdx ; \
orq %rdx, %rax
#define mux4(r0,r1,r2,r3,PNE,PEQ) \
movq PNE, r0 ; \
movq PEQ, %rax ; \
cmovzq %rax, r0 ; \
movq 8+PNE, r1 ; \
movq 8+PEQ, %rax ; \
cmovzq %rax, r1 ; \
movq 16+PNE, r2 ; \
movq 16+PEQ, %rax ; \
cmovzq %rax, r2 ; \
movq 24+PNE, r3 ; \
movq 24+PEQ, %rax ; \
cmovzq %rax, r3
#define load4(r0,r1,r2,r3,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3
#define store4(P,r0,r1,r2,r3) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P
S2N_BN_SYMBOL(sm2_montjmixadd):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
// Put the input y in %rbp where it lasts throughout the main code.
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rdx, %rbp
// Main code, just a sequence of basic field operations
// 8 * multiply + 3 * square + 7 * subtract
amontsqr_sm2(zp2,z_1)
montmul_sm2(y2a,z_1,y_2)
montmul_sm2(x2a,zp2,x_2)
montmul_sm2(y2a,zp2,y2a)
sub_sm2(xd,x2a,x_1)
sub_sm2(yd,y2a,y_1)
amontsqr_sm2(zz,xd)
montsqr_sm2(ww,yd)
montmul_sm2(zzx1,zz,x_1)
montmul_sm2(zzx2,zz,x2a)
sub_sm2(resx,ww,zzx1)
sub_sm2(t1,zzx2,zzx1)
montmul_sm2(resz,xd,z_1)
sub_sm2(resx,resx,zzx2)
sub_sm2(t2,zzx1,resx)
montmul_sm2(t1,t1,y_1)
montmul_sm2(t2,yd,t2)
sub_sm2(resy,t2,t1)
// Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence)
testzero4(z_1)
// Multiplex: if p1 <> 0 just copy the computed result from the staging area.
// If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in
// Montgomery form so not the simple constant 1 but rather 2^256 - p_sm2),
// hence giving 0 + p2 = p2 for the final result.
mux4(%r8,%r9,%r10,%r11,resx,x_2)
mux4(%r12,%r13,%r14,%r15,resy,y_2)
store4(x_3,%r8,%r9,%r10,%r11)
store4(y_3,%r12,%r13,%r14,%r15)
load4(%r8,%r9,%r10,%r11,resz)
movl $1, %eax
cmovzq %rax, %r8
movl $0x00000000ffffffff, %eax
cmovzq %rax, %r9
movl $0, %eax
cmovzq %rax, %r10
movq $0x0000000100000000, %rax
cmovzq %rax, %r11
store4(z_3,%r8,%r9,%r10,%r11)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(sm2_montjmixadd)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 2,545
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_double_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Double modulo p_sm2, z := (2 * x) mod p_sm2, assuming x reduced
// Input x[4]; output z[4]
//
// extern void bignum_double_sm2(uint64_t z[static 4], const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_double_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_double_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_double_sm2)
.text
#define z %rdi
#define x %rsi
#define d0 %rdx
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define n1 %r10
#define n3 %r11
#define c %rax
#define n1short %r10d
S2N_BN_SYMBOL(bignum_double_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load the input and double it so that 2^256 * c + [d3;d2;d1;d0] = 2 * x
// Could also consider using shld to decouple carries
xorq c, c
movq (x), d0
addq d0, d0
movq 8(x), d1
adcq d1, d1
movq 16(x), d2
adcq d2, d2
movq 24(x), d3
adcq d3, d3
adcq c, c
// Now subtract 2^256 * c + [d3;d3;d1;d1] = 2 * x - p_sm2
// The constants n1 and n3 in [n3; -1; n1; -1] = p_sm2 are saved for later
subq $-1, d0
movq $0xffffffff00000000, n1
sbbq n1, d1
sbbq $-1, d2
movq $0xfffffffeffffffff, n3
sbbq n3, d3
// Since by hypothesis x < p_sm2 we know 2 * x - p_sm2 < 2^256, so the top
// carry c actually gives us a bitmask for 2 * x - p_sm2 < 0, which we
// now use to make a masked p_sm2' = [n3; c; n1; c]
sbbq $0, c
andq c, n1
andq c, n3
// Do the corrective addition and copy to output
addq c, d0
movq d0, (z)
adcq n1, d1
movq d1, 8(z)
adcq c, d2
movq d2, 16(z)
adcq n3, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_double_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 3,644
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_cmul_sm2_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply by a single word modulo p_sm2, z := (c * x) mod p_sm2, assuming
// x reduced
// Inputs c, x[4]; output z[4]
//
// extern void bignum_cmul_sm2_alt(uint64_t z[static 4], uint64_t c,
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = c, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = c, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_sm2_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_sm2_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_sm2_alt)
.text
#define z %rdi
// Temporarily moved here for initial multiply then thrown away
#define x %rcx
#define m %rsi
// Other variables
#define d %rdx
#define a %rax
#define c %rcx
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
#define h %rsi
#define hshort %esi
// Multiplier again for second stage
#define q %rdx
#define qshort %edx
S2N_BN_SYMBOL(bignum_cmul_sm2_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Shuffle inputs (since we want %rdx for the high parts of products)
movq %rdx, x
// Multiply, accumulating the result as ca = 2^256 * h + [d3;d2;d1;d0]
movq (x), a
mulq m
movq a, d0
movq d, d1
movq 8(x), a
mulq m
xorq d2, d2
addq a, d1
adcq d, d2
movq 16(x), a
mulq m
xorq d3, d3
addq a, d2
adcq d, d3
movq 24(x), a
mulq m
xorl hshort, hshort
addq a, d3
adcq d, h
// Quotient approximation is (h * (1 + 2^32 + 2^64) + d3 + 2^64) >> 64.
// Note that by hypothesis our product is <= (2^64 - 1) * (p_sm2 - 1),
// so there is no need to max this out to avoid wrapping, unlike in the
// more general case of bignum_mod_sm2.
movq d3, a
movl $1, qshort
addq h, a
adcq h, q
shrq $32, a
addq h, a
shrq $32, a
addq a, q
// Now compute the initial pre-reduced [h;d3;d2;d1;d0] = ca - p_sm2 * q
// = ca - (2^256 - 2^224 - 2^96 + 2^64 - 1) * q
movq q, a
movq q, c
shlq $32, a
shrq $32, c
addq a, d3
adcq c, h
subq q, a
sbbq $0, c
subq q, h
addq q, d0
adcq a, d1
adcq c, d2
adcq $0, d3
adcq $0, h
// Now our top word h is either zero or all 1s, and we use this to discriminate
// whether a correction is needed because our result is negative, as a bitmask
// Do a masked addition of p_sm2
movq $0xffffffff00000000, a
andq h, a
movq $0xfffffffeffffffff, c
andq h, c
addq h, d0
movq d0, (z)
adcq a, d1
movq d1, 8(z)
adcq h, d2
movq d2, 16(z)
adcq c, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_cmul_sm2_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 4,678
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_mod_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo field characteristic, z := x mod p_sm2
// Input x[k]; output z[4]
//
// extern void bignum_mod_sm2(uint64_t z[static 4], uint64_t k, const uint64_t *x);
//
// Standard x86-64 ABI: RDI = z, RSI = k, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = k, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_sm2)
.text
#define z %rdi
#define k %rsi
#define x %rdx
#define m0 %r8
#define m1 %r9
#define m2 %r10
#define m3 %r11
#define d %r12
#define n0 %rax
#define n1 %rbx
#define n3 %rcx
#define q %rcx
#define qshort %ecx
S2N_BN_SYMBOL(bignum_mod_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save extra registers
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
// If the input is already <= 3 words long, go to a trivial "copy" path
cmpq $4, k
jc Lbignum_mod_sm2_shortinput
// Otherwise load the top 4 digits (top-down) and reduce k by 4
subq $4, k
movq 24(x,k,8), m3
movq 16(x,k,8), m2
movq 8(x,k,8), m1
movq (x,k,8), m0
// Load non-trivial digits [n3; -1; n1; -1] = p_sm2 and do a conditional
// subtraction to reduce the four starting digits [m3;m2;m1;m0] modulo p_sm2
subq $-1, m0
movq $0xffffffff00000000, n1
sbbq n1, m1
movq $0xfffffffeffffffff, n3
sbbq $-1, m2
sbbq n3, m3
sbbq n0, n0
andq n0, n1
andq n0, n3
addq n0, m0
adcq n1, m1
adcq n0, m2
adcq n3, m3
// Now do (k-4) iterations of 5->4 word modular reduction
testq k, k
jz Lbignum_mod_sm2_writeback
Lbignum_mod_sm2_loop:
// Writing the input, with the new zeroth digit implicitly appended, as
// z = 2^256 * m3 + 2^192 * m2 + t, our intended quotient approximation is
// MIN ((m3 * (1 + 2^32 + 2^64) + m2 + 2^64) >> 64) (2^64 - 1)
movq m2, d
movl $1, qshort
addq m3, d
adcq m3, q
shrq $32, d
addq m3, d
shrq $32, d
addq d, q
sbbq $0, q
// Load the next digit so current m to reduce = [m3;m2;m1;m0;d]
movq -8(x,k,8), d
// Now compute the initial pre-reduced [m3;m2;m1;m0;d] = m - p_sm2 * q
// = z - (2^256 - 2^224 - 2^96 + 2^64 - 1) * q
movq q, n0
movq q, n1
shlq $32, n0
shrq $32, n1
addq n0, m2
adcq n1, m3
subq q, n0
sbbq $0, n1
subq q, m3
addq q, d
adcq n0, m0
adcq n1, m1
adcq $0, m2
adcq $0, m3
// Now our top word m3 is either zero or all 1s, and we use this to discriminate
// whether a correction is needed because our result is negative, as a bitmask
// Do a masked addition of p_sm2
movq $0xffffffff00000000, n1
andq m3, n1
movq $0xfffffffeffffffff, n3
andq m3, n3
addq m3, d
adcq n1, m0
adcq m3, m1
adcq n3, m2
// Shuffle registers up and loop
movq m2, m3
movq m1, m2
movq m0, m1
movq d, m0
decq k
jnz Lbignum_mod_sm2_loop
// Write back
Lbignum_mod_sm2_writeback:
movq m0, (z)
movq m1, 8(z)
movq m2, 16(z)
movq m3, 24(z)
// Restore registers and return
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mod_sm2)
Lbignum_mod_sm2_shortinput:
xorq m0, m0
xorq m1, m1
xorq m2, m2
xorq m3, m3
testq k, k
jz Lbignum_mod_sm2_writeback
movq (%rdx), m0
decq k
jz Lbignum_mod_sm2_writeback
movq 8(%rdx), m1
decq k
jz Lbignum_mod_sm2_writeback
movq 16(%rdx), m2
jmp Lbignum_mod_sm2_writeback
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 4,194
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_deamont_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from almost-Montgomery form, z := (x / 2^256) mod p_sm2
// Input x[4]; output z[4]
//
// extern void bignum_deamont_sm2(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Convert a 4-digit bignum x out of its (optionally almost) Montgomery form,
// "almost" meaning any 4-digit input will work, with no range restriction.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_deamont_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_sm2)
.text
#define z %rdi
#define x %rsi
#define c %rcx
#define n1 %rax
#define n3 %rdx
// ---------------------------------------------------------------------------
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d3;d2;d1;d0] and returns result in [d0;d3;d2;d1], adding to the
// existing contents of [d3;d2;d1], and using %rax, %rcx, %rdx and %rsi
// as temporaries.
// ---------------------------------------------------------------------------
#define montreds(d3,d2,d1,d0) \
movq d0, %rax ; \
shlq $32, %rax ; \
movq d0, %rcx ; \
shrq $32, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rsi ; \
subq d0, %rax ; \
sbbq $0, %rcx ; \
subq %rax, d1 ; \
sbbq %rcx, d2 ; \
sbbq %rdx, d3 ; \
sbbq %rsi, d0
S2N_BN_SYMBOL(bignum_deamont_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Set up an initial 4-word window [%r11,%r10,%r9,%r8] = x
movq (x), %r8
movq 8(x), %r9
movq 16(x), %r10
movq 24(x), %r11
// Systematically scroll left doing 1-step reductions. This process
// keeps things inside 4 digits (i.e. < 2^256) at each stage, since
// we have w * p_sm2 + x <= (2^64 - 1) * p_sm2 + (2 EXP 256 - 1)
// <= (2^64 - 1) * (2^256 - 1) + (2 EXP 256 - 1) <= 2^64 * (2^256 - 1)
montreds(%r11,%r10,%r9,%r8)
montreds(%r8,%r11,%r10,%r9)
montreds(%r9,%r8,%r11,%r10)
montreds(%r10,%r9,%r8,%r11)
// Let [%r11;%r10;%r9;%r8] := [%r11;%r10;%r9;%r8] - p_sm2, saving constants
// n1 and n3 in [n3; -1; n1; -1] = p_sm2 for later use.
subq $-1, %r8
movq $0xffffffff00000000, n1
sbbq n1, %r9
sbbq $-1, %r10
movq $0xfffffffeffffffff, n3
sbbq n3, %r11
// Capture the carry to determine whether to add back p_sm2, and use
// it to create a masked p_sm2' = [n3; c; n1; c]
sbbq c, c
andq c, n1
andq c, n3
// Do the corrective addition and copy to output
addq c, %r8
movq %r8, (z)
adcq n1, %r9
movq %r9, 8(z)
adcq c, %r10
movq %r10, 16(z)
adcq n3, %r11
movq %r11, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_deamont_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 23,068
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/sm2_montjadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates
//
// extern void sm2_montjadd_alt(uint64_t p3[static 12],
// const uint64_t p1[static 12],
// const uint64_t p2[static 12]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjadd_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjadd_alt)
.text
// Size of individual field elements
#define NUMSIZE 32
// Pointer-offset pairs for inputs and outputs
// These assume %rdi = p3, %rsi = p1 and %rbp = p2,
// which needs to be set up explicitly before use.
// By design, none of the code macros modify any of
// these, so we maintain the assignments throughout.
#define x_1 0(%rsi)
#define y_1 NUMSIZE(%rsi)
#define z_1 (2*NUMSIZE)(%rsi)
#define x_2 0(%rbp)
#define y_2 NUMSIZE(%rbp)
#define z_2 (2*NUMSIZE)(%rbp)
#define x_3 0(%rdi)
#define y_3 NUMSIZE(%rdi)
#define z_3 (2*NUMSIZE)(%rdi)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z1sq (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define x1a (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define z2sq (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define y1a (NUMSIZE*6)(%rsp)
#define NSPACE NUMSIZE*7
// Corresponds to bignum_montmul_sm2_alt except for registers
#define montmul_sm2(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds to bignum_montsqr_sm2_alt except for registers
#define montsqr_sm2(P0,P1) \
movq P1, %rax ; \
movq %rax, %rbx ; \
mulq %rax; \
movq %rax, %r8 ; \
movq %rdx, %r15 ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
movq %rax, %r9 ; \
movq %rdx, %r10 ; \
movq 0x18+P1, %rax ; \
movq %rax, %r13 ; \
mulq %rbx; \
movq %rax, %r11 ; \
movq %rdx, %r12 ; \
movq 0x10+P1, %rax ; \
movq %rax, %rbx ; \
mulq %r13; \
movq %rax, %r13 ; \
movq %rdx, %r14 ; \
movq P1, %rax ; \
mulq %rbx; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rcx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %rcx, %rcx ; \
movq 0x18+P1, %rbx ; \
movq 0x8+P1, %rax ; \
mulq %rbx; \
subq %rcx, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorl %ecx, %ecx ; \
addq %r9, %r9 ; \
adcq %r10, %r10 ; \
adcq %r11, %r11 ; \
adcq %r12, %r12 ; \
adcq %r13, %r13 ; \
adcq %r14, %r14 ; \
adcq %rcx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq %rax; \
addq %r15, %r9 ; \
adcq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %r15, %r15 ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
negq %r15; \
adcq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %r15, %r15 ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
negq %r15; \
adcq %rax, %r14 ; \
adcq %rcx, %rdx ; \
movq %rdx, %r15 ; \
movq %r8, %rax ; \
shlq $0x20, %rax ; \
movq %r8, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r8, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r9 ; \
sbbq %rcx, %r10 ; \
sbbq %rdx, %r11 ; \
sbbq %rbx, %r8 ; \
movq %r9, %rax ; \
shlq $0x20, %rax ; \
movq %r9, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r9, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r10 ; \
sbbq %rcx, %r11 ; \
sbbq %rdx, %r8 ; \
sbbq %rbx, %r9 ; \
movq %r10, %rax ; \
shlq $0x20, %rax ; \
movq %r10, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r10, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r11 ; \
sbbq %rcx, %r8 ; \
sbbq %rdx, %r9 ; \
sbbq %rbx, %r10 ; \
movq %r11, %rax ; \
shlq $0x20, %rax ; \
movq %r11, %rcx ; \
shrq $0x20, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq %r11, %rax ; \
sbbq $0x0, %rcx ; \
subq %rax, %r8 ; \
sbbq %rcx, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
xorl %eax, %eax ; \
addq %r8, %r12 ; \
adcq %r9, %r13 ; \
adcq %r10, %r14 ; \
adcq %r11, %r15 ; \
adcq %rax, %rax ; \
movl $0x1, %ecx ; \
movl $0xffffffff, %edx ; \
xorl %ebx, %ebx ; \
addq %r12, %rcx ; \
leaq 0x1(%rdx), %r11 ; \
adcq %r13, %rdx ; \
leaq -0x1(%rbx), %r8 ; \
adcq %r14, %rbx ; \
adcq %r15, %r11 ; \
adcq %rax, %r8 ; \
cmovbq %rcx, %r12 ; \
cmovbq %rdx, %r13 ; \
cmovbq %rbx, %r14 ; \
cmovbq %r11, %r15 ; \
movq %r12, P0 ; \
movq %r13, 0x8+P0 ; \
movq %r14, 0x10+P0 ; \
movq %r15, 0x18+P0
// Corresponds exactly to bignum_sub_sm2
#define sub_sm2(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rcx ; \
sbbq 0x8+P2, %rcx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq $0xffffffff00000000, %r10 ; \
sbbq %r11, %r11 ; \
andq %r11, %r10 ; \
movq %r11, %rdx ; \
btr $0x20, %rdx ; \
addq %r11, %rax ; \
movq %rax, P0 ; \
adcq %r10, %rcx ; \
movq %rcx, 0x8+P0 ; \
adcq %r11, %r8 ; \
movq %r8, 0x10+P0 ; \
adcq %rdx, %r9 ; \
movq %r9, 0x18+P0
// Additional macros to help with final multiplexing
#define load4(r0,r1,r2,r3,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3
#define store4(P,r0,r1,r2,r3) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P
#define czload4(r0,r1,r2,r3,P) \
cmovzq P, r0 ; \
cmovzq 8+P, r1 ; \
cmovzq 16+P, r2 ; \
cmovzq 24+P, r3
#define muxload4(r0,r1,r2,r3,P0,P1,P2) \
movq P0, r0 ; \
cmovbq P1, r0 ; \
cmovnbe P2, r0 ; \
movq 8+P0, r1 ; \
cmovbq 8+P1, r1 ; \
cmovnbe 8+P2, r1 ; \
movq 16+P0, r2 ; \
cmovbq 16+P1, r2 ; \
cmovnbe 16+P2, r2 ; \
movq 24+P0, r3 ; \
cmovbq 24+P1, r3 ; \
cmovnbe 24+P2, r3
S2N_BN_SYMBOL(sm2_montjadd_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
// Put the input y in %rbp where it lasts throughout the main code.
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
movq %rdx, %rbp
// Main code, just a sequence of basic field operations
// 12 * multiply + 4 * square + 7 * subtract
montsqr_sm2(z1sq,z_1)
montsqr_sm2(z2sq,z_2)
montmul_sm2(y1a,z_2,y_1)
montmul_sm2(y2a,z_1,y_2)
montmul_sm2(x2a,z1sq,x_2)
montmul_sm2(x1a,z2sq,x_1)
montmul_sm2(y2a,z1sq,y2a)
montmul_sm2(y1a,z2sq,y1a)
sub_sm2(xd,x2a,x1a)
sub_sm2(yd,y2a,y1a)
montsqr_sm2(zz,xd)
montsqr_sm2(ww,yd)
montmul_sm2(zzx1,zz,x1a)
montmul_sm2(zzx2,zz,x2a)
sub_sm2(resx,ww,zzx1)
sub_sm2(t1,zzx2,zzx1)
montmul_sm2(xd,xd,z_1)
sub_sm2(resx,resx,zzx2)
sub_sm2(t2,zzx1,resx)
montmul_sm2(t1,t1,y1a)
montmul_sm2(resz,xd,z_2)
montmul_sm2(t2,yd,t2)
sub_sm2(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "NBE" <=> ~(CF \/ ZF) <=> P1 = 0 /\ ~(P2 = 0)
// and "B" <=> CF <=> ~(P1 = 0) /\ P2 = 0
// and "Z" <=> ZF <=> (P1 = 0 <=> P2 = 0)
load4(%r8,%r9,%r10,%r11,z_1)
movq %r8, %rax
movq %r9, %rdx
orq %r10, %rax
orq %r11, %rdx
orq %rdx, %rax
negq %rax
sbbq %rax, %rax
load4(%r12,%r13,%r14,%r15,z_2)
movq %r12, %rbx
movq %r13, %rdx
orq %r14, %rbx
orq %r15, %rdx
orq %rdx, %rbx
negq %rbx
sbbq %rbx, %rbx
cmpq %rax, %rbx
// Multiplex the outputs accordingly, re-using the z's in registers
cmovbq %r8, %r12
cmovbq %r9, %r13
cmovbq %r10, %r14
cmovbq %r11, %r15
czload4(%r12,%r13,%r14,%r15,resz)
muxload4(%rax,%rbx,%rcx,%rdx,resx,x_1,x_2)
muxload4(%r8,%r9,%r10,%r11,resy,y_1,y_2)
// Finally store back the multiplexed values
store4(x_3,%rax,%rbx,%rcx,%rdx)
store4(y_3,%r8,%r9,%r10,%r11)
store4(z_3,%r12,%r13,%r14,%r15)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(sm2_montjadd_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 3,391
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_demont_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from Montgomery form z := (x / 2^256) mod p_sm2, assuming x reduced
// Input x[4]; output z[4]
//
// extern void bignum_demont_sm2(uint64_t z[static 4], const uint64_t x[static 4]);
//
// This assumes the input is < p_sm2 for correctness. If this is not the case,
// use the variant "bignum_deamont_sm2" instead.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_demont_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_sm2)
.text
#define z %rdi
#define x %rsi
// ---------------------------------------------------------------------------
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d3;d2;d1;d0] and returns result in [d0;d3;d2;d1], adding to the
// existing contents of [d3;d2;d1], and using %rax, %rcx, %rdx and %rsi
// as temporaries.
// ---------------------------------------------------------------------------
#define montreds(d3,d2,d1,d0) \
movq d0, %rax ; \
shlq $32, %rax ; \
movq d0, %rcx ; \
shrq $32, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rsi ; \
subq d0, %rax ; \
sbbq $0, %rcx ; \
subq %rax, d1 ; \
sbbq %rcx, d2 ; \
sbbq %rdx, d3 ; \
sbbq %rsi, d0
S2N_BN_SYMBOL(bignum_demont_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Set up an initial 4-word window [%r11,%r10,%r9,%r8] = x
movq (x), %r8
movq 8(x), %r9
movq 16(x), %r10
movq 24(x), %r11
// Systematically scroll left doing 1-step reductions. This process
// keeps things reduced < p_sm2 at each stage, since we have
// w * p_sm2 + x <= (2^64 - 1) * p_sm2 + (p_sm2 - 1) < 2^64 * p_sm2
montreds(%r11,%r10,%r9,%r8)
montreds(%r8,%r11,%r10,%r9)
montreds(%r9,%r8,%r11,%r10)
montreds(%r10,%r9,%r8,%r11)
// Write back result
movq %r8, (z)
movq %r9, 8(z)
movq %r10, 16(z)
movq %r11, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_demont_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 3,316
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_triple_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Triple modulo p_sm2, z := (3 * x) mod p_sm2
// Input x[4]; output z[4]
//
// extern void bignum_triple_sm2(uint64_t z[static 4], const uint64_t x[static 4]);
//
// The input x can be any 4-digit bignum, not necessarily reduced modulo p_sm2,
// and the result is always fully reduced, i.e. z = (3 * x) mod p_sm2.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_sm2)
.text
#define z %rdi
#define x %rsi
// Main digits of intermediate results
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
// Quotient estimate = top of product + 1
#define q %rdx
#define h %rdx
#define qshort %edx
// Other temporary variables and their short version
#define a %rax
#define c %rcx
#define ashort %eax
S2N_BN_SYMBOL(bignum_triple_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// First do the multiplication by 3, getting z = [h; d3; ...; d0]
// but immediately form the quotient estimate q = h + 1
xorl ashort, ashort
movq (x), q
movq q, d0
adcxq q, q
adoxq q, d0
movq 8(x), q
movq q, d1
adcxq q, q
adoxq q, d1
movq 16(x), q
movq q, d2
adcxq q, q
adoxq q, d2
movq 24(x), q
movq q, d3
adcxq q, q
adoxq q, d3
// For this limited range a simple quotient estimate of q = h + 1 works, where
// h = floor(z / 2^256). Then -p_sm2 <= z - q * p_sm2 < p_sm2, so we just need
// to subtract q * p_sm2 and then if that's negative, add back p_sm2.
movl $1, qshort
adcxq a, q
adoxq a, q
// Now compute the initial pre-reduced [h;d3;d2;d1;d0] = z - p_sm2 * q
// = z - (2^256 - 2^224 - 2^96 + 2^64 - 1) * q
movq q, a
shlq $32, a
movq a, c
subq q, a
addq q, d0
adcq a, d1
adcq $0, d2
adcq c, d3
sbbq h, h
notq h
// Now our top word h is either zero or all 1s, and we use this to discriminate
// whether a correction is needed because our result is negative, as a bitmask
// Do a masked addition of p_sm2
movq $0xffffffff00000000, a
andq h, a
movq $0xfffffffeffffffff, c
andq h, c
addq h, d0
movq d0, (z)
adcq a, d1
movq d1, 8(z)
adcq h, d2
movq d2, 16(z)
adcq c, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_triple_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 6,197
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_montsqr_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery square, z := (x^2 / 2^256) mod p_sm2
// Input x[4]; output z[4]
//
// extern void bignum_montsqr_sm2(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Does z := (x^2 / 2^256) mod p_sm2, assuming x^2 <= 2^256 * p_sm2, which is
// guaranteed in particular if x < p_sm2 initially (the "intended" case).
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_sm2)
.text
#define z %rdi
#define x %rsi
// Use this fairly consistently for a zero
#define zero %rbp
#define zeroe %ebp
// Add %rdx * m into a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax and %rbx as temporaries
#define mulpadd(high,low,m) \
mulxq m, %rax, %rbx ; \
adcxq %rax, low ; \
adoxq %rbx, high
// ---------------------------------------------------------------------------
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d3;d2;d1;d0] and returns result in [d0;d3;d2;d1], adding to the
// existing contents of [d3;d2;d1], and using %rax, %rcx, %rdx and %rbx
// as temporaries.
// ---------------------------------------------------------------------------
#define montreds(d3,d2,d1,d0) \
movq d0, %rax ; \
shlq $32, %rax ; \
movq d0, %rcx ; \
shrq $32, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq d0, %rax ; \
sbbq $0, %rcx ; \
subq %rax, d1 ; \
sbbq %rcx, d2 ; \
sbbq %rdx, d3 ; \
sbbq %rbx, d0
S2N_BN_SYMBOL(bignum_montsqr_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Compute [%r15;%r8] = [00] which we use later, but mainly
// set up an initial window [%r14;...;%r9] = [23;03;01]
movq (x), %rdx
mulxq %rdx, %r8, %r15
mulxq 8(x), %r9, %r10
mulxq 24(x), %r11, %r12
movq 16(x), %rdx
mulxq 24(x), %r13, %r14
// Clear our zero register, and also initialize the flags for the carry chain
xorl zeroe, zeroe
// Chain in the addition of 02 + 12 + 13 to that window (no carry-out possible)
// This gives all the "heterogeneous" terms of the squaring ready to double
mulpadd(%r11,%r10,(x))
mulpadd(%r12,%r11,8(x))
movq 24(x), %rdx
mulpadd(%r13,%r12,8(x))
adcxq zero, %r13
adoxq zero, %r14
adcq zero, %r14
// Double and add to the 00 + 11 + 22 + 33 terms
xorl zeroe, zeroe
adcxq %r9, %r9
adoxq %r15, %r9
movq 8(x), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r10, %r10
adoxq %rax, %r10
adcxq %r11, %r11
adoxq %rdx, %r11
movq 16(x), %rdx
mulxq %rdx, %rax, %rdx
adcxq %r12, %r12
adoxq %rax, %r12
adcxq %r13, %r13
adoxq %rdx, %r13
movq 24(x), %rdx
mulxq %rdx, %rax, %r15
adcxq %r14, %r14
adoxq %rax, %r14
adcxq zero, %r15
adoxq zero, %r15
// Squaring complete. Perform 4 Montgomery steps to rotate the lower half
montreds(%r11,%r10,%r9,%r8)
montreds(%r8,%r11,%r10,%r9)
montreds(%r9,%r8,%r11,%r10)
montreds(%r10,%r9,%r8,%r11)
// Add high and low parts, catching carry in %rax
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
// Load [%r8;%r11;%rbp;%rdx;%rcx] = 2^320 - p_sm2 then do
// [%r8;%r11;%rbp;%rdx;%rcx] = [%rax;%r15;%r14;%r13;%r12] + (2^320 - p_sm2)
movl $1, %ecx
movl $0x00000000FFFFFFFF, %edx
xorl %ebp, %ebp
addq %r12, %rcx
leaq 1(%rdx), %r11
adcq %r13, %rdx
leaq -1(%rbp), %r8
adcq %r14, %rbp
adcq %r15, %r11
adcq %rax, %r8
// Now carry is set if r + (2^320 - p_sm2) >= 2^320, i.e. r >= p_sm2
// where r is the pre-reduced form. So conditionally select the
// output accordingly.
cmovcq %rcx, %r12
cmovcq %rdx, %r13
cmovcq %rbp, %r14
cmovcq %r11, %r15
// Write back reduced value
movq %r12, (z)
movq %r13, 8(z)
movq %r14, 16(z)
movq %r15, 24(z)
// Restore saved registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 3,382
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_cmul_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply by a single word modulo p_sm2, z := (c * x) mod p_sm2, assuming
// x reduced
// Inputs c, x[4]; output z[4]
//
// extern void bignum_cmul_sm2(uint64_t z[static 4], uint64_t c,
// const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = c, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = c, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_sm2)
.text
#define z %rdi
// Temporarily moved here for initial multiply
#define x %rcx
// Likewise this is thrown away after initial multiply
#define m %rdx
#define a %rax
#define c %rcx
#define d0 %rsi
#define d1 %r8
#define d2 %r9
#define d3 %r10
#define h %r11
// Multiplier again for second stage
#define q %rdx
#define qshort %edx
S2N_BN_SYMBOL(bignum_cmul_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Shuffle inputs (since we want multiplier in %rdx)
movq %rdx, x
movq %rsi, m
// Multiply, accumulating the result as ca = 2^256 * h + [d3;d2;d1;d0]
mulxq (x), d0, d1
mulxq 8(x), a, d2
addq a, d1
mulxq 16(x), a, d3
adcq a, d2
mulxq 24(x), a, h
adcq a, d3
adcq $0, h
// Quotient approximation is (h * (1 + 2^32 + 2^64) + d3 + 2^64) >> 64.
// Note that by hypothesis our product is <= (2^64 - 1) * (p_sm2 - 1),
// so there is no need to max this out to avoid wrapping, unlike in the
// more general case of bignum_mod_sm2.
movq d3, a
movl $1, qshort
addq h, a
adcq h, q
shrq $32, a
addq h, a
shrq $32, a
addq a, q
// Now compute the initial pre-reduced [h;d3;d2;d1;d0] = ca - p_sm2 * q
// = ca - (2^256 - 2^224 - 2^96 + 2^64 - 1) * q
movq q, a
movq q, c
shlq $32, a
shrq $32, c
addq a, d3
adcq c, h
subq q, a
sbbq $0, c
subq q, h
addq q, d0
adcq a, d1
adcq c, d2
adcq $0, d3
adcq $0, h
// Now our top word h is either zero or all 1s, and we use this to discriminate
// whether a correction is needed because our result is negative, as a bitmask
// Do a masked addition of p_sm2
movq $0xffffffff00000000, a
andq h, a
movq $0xfffffffeffffffff, c
andq h, c
addq h, d0
movq d0, (z)
adcq a, d1
movq d1, 8(z)
adcq h, d2
movq d2, 16(z)
adcq c, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_cmul_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,230
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_sub_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Subtract modulo p_sm2, z := (x - y) mod p_sm2
// Inputs x[4], y[4]; output z[4]
//
// extern void bignum_sub_sm2(uint64_t z[static 4], const uint64_t x[static 4],
// const uint64_t y[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sub_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sub_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sub_sm2)
.text
#define z %rdi
#define x %rsi
#define y %rdx
#define d0 %rax
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define n1 %r10
#define n3 %rdx
#define c %r11
#define n1short %r10d
S2N_BN_SYMBOL(bignum_sub_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Load and subtract the two inputs as [d3;d2;d1;d0] = x - y (modulo 2^256)
movq (x), d0
subq (y), d0
movq 8(x), d1
sbbq 8(y), d1
movq 16(x), d2
sbbq 16(y), d2
movq 24(x), d3
sbbq 24(y), d3
// Capture the carry, which indicates x < y, and create corresponding masked
// correction p_sm2' = [n3; c; n1; c] to add
movq $0xffffffff00000000, n1
sbbq c, c
andq c, n1
movq c, n3
btr $32, n3
// Do the corrective addition and copy to output
addq c, d0
movq d0, (z)
adcq n1, d1
movq d1, 8(z)
adcq c, d2
movq d2, 16(z)
adcq n3, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_sub_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 7,118
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_montsqr_sm2_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery square, z := (x^2 / 2^256) mod p_sm2
// Input x[4]; output z[4]
//
// extern void bignum_montsqr_sm2_alt(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// Does z := (x^2 / 2^256) mod p_sm2, assuming x^2 <= 2^256 * p_sm2, which is
// guaranteed in particular if x < p_sm2 initially (the "intended" case).
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_sm2_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_sm2_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_sm2_alt)
.text
#define z %rdi
#define x %rsi
// Add %rbx * m into a register-pair (high,low) maintaining consistent
// carry-catching with carry (negated, as bitmask) and using %rax and %rdx
// as temporaries
#define mulpadd(carry,high,low,m) \
movq m, %rax ; \
mulq %rbx; \
subq carry, %rdx ; \
addq %rax, low ; \
adcq %rdx, high ; \
sbbq carry, carry
// Initial version assuming no carry-in
#define mulpadi(carry,high,low,m) \
movq m, %rax ; \
mulq %rbx; \
addq %rax, low ; \
adcq %rdx, high ; \
sbbq carry, carry
// End version not catching the top carry-out
#define mulpade(carry,high,low,m) \
movq m, %rax ; \
mulq %rbx; \
subq carry, %rdx ; \
addq %rax, low ; \
adcq %rdx, high
// ---------------------------------------------------------------------------
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d3;d2;d1;d0] and returns result in [d0;d3;d2;d1], adding to the
// existing contents of [d3;d2;d1], and using %rax, %rcx, %rdx and %rbx
// as temporaries.
// ---------------------------------------------------------------------------
#define montreds(d3,d2,d1,d0) \
movq d0, %rax ; \
shlq $32, %rax ; \
movq d0, %rcx ; \
shrq $32, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq d0, %rax ; \
sbbq $0, %rcx ; \
subq %rax, d1 ; \
sbbq %rcx, d2 ; \
sbbq %rdx, d3 ; \
sbbq %rbx, d0
S2N_BN_SYMBOL(bignum_montsqr_sm2_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Compute [%r15;%r8] = [00] which we use later, but mainly
// set up an initial window [%r14;...;%r9] = [23;03;01]
movq (x), %rax
movq %rax, %rbx
mulq %rax
movq %rax, %r8
movq %rdx, %r15
movq 8(x), %rax
mulq %rbx
movq %rax, %r9
movq %rdx, %r10
movq 24(x), %rax
movq %rax, %r13
mulq %rbx
movq %rax, %r11
movq %rdx, %r12
movq 16(x), %rax
movq %rax, %rbx
mulq %r13
movq %rax, %r13
movq %rdx, %r14
// Chain in the addition of 02 + 12 + 13 to that window (no carry-out possible)
// This gives all the "heterogeneous" terms of the squaring ready to double
mulpadi(%rcx,%r11,%r10,(x))
mulpadd(%rcx,%r12,%r11,8(x))
movq 24(x), %rbx
mulpade(%rcx,%r13,%r12,8(x))
adcq $0, %r14
// Double the window [%r14;...;%r9], catching top carry in %rcx
xorl %ecx, %ecx
addq %r9, %r9
adcq %r10, %r10
adcq %r11, %r11
adcq %r12, %r12
adcq %r13, %r13
adcq %r14, %r14
adcq %rcx, %rcx
// Add to the 00 + 11 + 22 + 33 terms
movq 8(x), %rax
mulq %rax
addq %r15, %r9
adcq %rax, %r10
adcq %rdx, %r11
sbbq %r15, %r15
movq 16(x), %rax
mulq %rax
negq %r15
adcq %rax, %r12
adcq %rdx, %r13
sbbq %r15, %r15
movq 24(x), %rax
mulq %rax
negq %r15
adcq %rax, %r14
adcq %rcx, %rdx
movq %rdx, %r15
// Squaring complete. Perform 4 Montgomery steps to rotate the lower half
montreds(%r11,%r10,%r9,%r8)
montreds(%r8,%r11,%r10,%r9)
montreds(%r9,%r8,%r11,%r10)
montreds(%r10,%r9,%r8,%r11)
// Add high and low parts, catching carry in %rax
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
// Load [%r8;%r11;%rbx;%rdx;%rcx] = 2^320 - p_sm2 then do
// [%r8;%r11;%rbx;%rdx;%rcx] = [%rax;%r15;%r14;%r13;%r12] + (2^320 - p_sm2)
movl $1, %ecx
movl $0x00000000FFFFFFFF, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 1(%rdx), %r11
adcq %r13, %rdx
leaq -1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
// Now carry is set if r + (2^320 - p_sm2) >= 2^320, i.e. r >= p_sm2
// where r is the pre-reduced form. So conditionally select the
// output accordingly.
cmovcq %rcx, %r12
cmovcq %rdx, %r13
cmovcq %rbx, %r14
cmovcq %r11, %r15
// Write back reduced value
movq %r12, (z)
movq %r13, 8(z)
movq %r14, 16(z)
movq %r15, 24(z)
// Restore saved registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_sm2_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,181
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_mod_sm2_4.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo field characteristic, z := x mod p_sm2
// Input x[4]; output z[4]
//
// extern void bignum_mod_sm2_4(uint64_t z[static 4], const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_sm2_4)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_sm2_4)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_sm2_4)
.text
#define z %rdi
#define x %rsi
#define d0 %rdx
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define n1 %r10
#define n3 %r11
#define c %rax
S2N_BN_SYMBOL(bignum_mod_sm2_4):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load the input and subtract to get [d3;d3;d1;d1] = x - p_sm2 (modulo 2^256)
// The constants n1 and n3 in [n3; -1; n1; -1] = p_sm2 are saved for later
movq (x), d0
subq $-1, d0
movq 8(x), d1
movq $0xffffffff00000000, n1
sbbq n1, d1
movq 16(x), d2
sbbq $-1, d2
movq $0xfffffffeffffffff, n3
movq 24(x), d3
sbbq n3, d3
// Capture the carry to determine whether to add back p_sm2, and use
// it to create a masked p_sm2' = [n3; c; n1; c]
sbbq c, c
andq c, n1
andq c, n3
// Do the corrective addition and copy to output
addq c, d0
movq d0, (z)
adcq n1, d1
movq d1, 8(z)
adcq c, d2
movq d2, 16(z)
adcq n3, d3
movq d3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mod_sm2_4)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,312
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_neg_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Negate modulo p_sm2, z := (-x) mod p_sm2, assuming x reduced
// Input x[4]; output z[4]
//
// extern void bignum_neg_sm2(uint64_t z[static 4], const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_neg_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_sm2)
.text
#define z %rdi
#define x %rsi
#define q %rdx
#define d0 %rax
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define n1 %r10
#define n3 %r11
S2N_BN_SYMBOL(bignum_neg_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load the input digits as [d3;d2;d1;d0] and also set a bitmask q
// for the input being nonzero, so that we avoid doing -0 = p_sm2
// and hence maintain strict modular reduction
movq (x), d0
movq 8(x), d1
movq d0, n1
orq d1, n1
movq 16(x), d2
movq 24(x), d3
movq d2, n3
orq d3, n3
orq n1, n3
negq n3
sbbq q, q
// Load the non-trivial words of p_sm2 = [n3;-1;n1;-1] and mask them with q
movq $0xffffffff00000000, n1
movq $0xfffffffeffffffff, n3
andq q, n1
andq q, n3
// Do the subtraction, using an xor for the first digit and getting the
// overall result as [n3;q;n1;d0], all these tweaks just to avoid moves
xorq q, d0
subq d1, n1
sbbq d2, q
sbbq d3, n3
// Write back
movq d0, (z)
movq n1, 8(z)
movq q, 16(z)
movq n3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_neg_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 90,786
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_montinv_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery inverse modulo p_sm2 = 2^256 - 2^224 - 2^96 + 2^64 - 1
// Input x[4]; output z[4]
//
// extern void bignum_montinv_sm2(uint64_t z[static 4],
// const uint64_t x[static 4]);
//
// If the 4-digit input x is coprime to p_sm2, i.e. is not divisible
// by it, returns z < p_sm2 such that x * z == 2^512 (mod p_sm2). This
// is effectively "Montgomery inverse" because if we consider x and z as
// Montgomery forms of X and Z, i.e. x == 2^256 * X and z == 2^256 * Z
// (both mod p_sm2) then X * Z == 1 (mod p_sm2). That is, this function
// gives the analog of the modular inverse bignum_inv_sm2 but with both
// input and output in the Montgomery domain. Note that x does not need
// to be reduced modulo p_sm2, but the output always is. If the input
// is divisible (i.e. is 0 or p_sm2), then there can be no solution to
// the congruence x * z == 2^512 (mod p_sm2), and z = 0 is returned.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montinv_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montinv_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montinv_sm2)
.text
// Size in bytes of a 64-bit word
#define N 8
// Pointer-offset pairs for temporaries on stack
#define f 0(%rsp)
#define g (5*N)(%rsp)
#define u (10*N)(%rsp)
#define v (15*N)(%rsp)
#define tmp (20*N)(%rsp)
#define tmp2 (21*N)(%rsp)
#define i (22*N)(%rsp)
#define d (23*N)(%rsp)
#define mat (24*N)(%rsp)
// Backup for the input pointer
#define res (28*N)(%rsp)
// Total size to reserve on the stack
#define NSPACE 30*N
// Syntactic variants to make x86_att version simpler to generate
#define F 0
#define G (5*N)
#define U (10*N)
#define V (15*N)
#define MAT (24*N)
#define ff (%rsp)
#define gg (5*N)(%rsp)
// ---------------------------------------------------------------------------
// Core signed almost-Montgomery reduction macro from u[4..0] to u[3..0].
// ---------------------------------------------------------------------------
#define amontred(P) \
/* We only know the input is -2^316 < x < 2^316. To do traditional */ \
/* unsigned Montgomery reduction, start by adding 2^61 * p_sm2. */ \
movq $0xe000000000000000, %r8 ; \
addq P, %r8 ; \
movq $0x1fffffffffffffff, %r9 ; \
adcq 8+P, %r9 ; \
movq $0xffffffffe0000000, %r10 ; \
adcq 16+P, %r10 ; \
movq $0xffffffffffffffff, %r11 ; \
adcq 24+P, %r11 ; \
movq $0x1fffffffdfffffff, %r12 ; \
adcq 32+P, %r12 ; \
/* Let [%rcx;%rbx] = 2^32 * d0 and [%rdx;%rax] = (2^32-1) * d0 */ \
movq %r8, %rbx ; \
movq %r8, %rcx ; \
shrq $32, %rcx ; \
shlq $32, %rbx ; \
movl $0xffffffff, %eax ; \
mulq %r8; \
/* Now [%r12;%r11;%r10;%r9] := [%r8;%r11;%r10;%r9] - [%rcx;%rbx;%rdx;%rax] */ \
subq %rax, %r9 ; \
sbbq %rdx, %r10 ; \
sbbq %rbx, %r11 ; \
sbbq %rcx, %r8 ; \
addq %r8, %r12 ; \
/* Now capture carry and subtract p_sm2 if set (almost-Montgomery) */ \
sbbq %rax, %rax ; \
movl $0xffffffff, %ebx ; \
notq %rbx; \
andq %rax, %rbx ; \
movq %rax, %rdx ; \
btr $32, %rdx ; \
subq %rax, %r9 ; \
movq %r9, P ; \
sbbq %rbx, %r10 ; \
movq %r10, 8+P ; \
sbbq %rax, %r11 ; \
movq %r11, 16+P ; \
sbbq %rdx, %r12 ; \
movq %r12, 24+P
// Very similar to a subroutine call to the s2n-bignum word_divstep59.
// But different in register usage and returning the final matrix as
//
// [ %r8 %r10]
// [ %r12 %r14]
//
// and also returning the matrix still negated (which doesn't matter)
#define divstep59(din,fin,gin) \
movq din, %rsi ; \
movq fin, %rdx ; \
movq gin, %rcx ; \
movq %rdx, %rbx ; \
andq $0xfffff, %rbx ; \
movabsq $0xfffffe0000000000, %rax ; \
orq %rax, %rbx ; \
andq $0xfffff, %rcx ; \
movabsq $0xc000000000000000, %rax ; \
orq %rax, %rcx ; \
movq $0xfffffffffffffffe, %rax ; \
xorl %ebp, %ebp ; \
movl $0x2, %edx ; \
movq %rbx, %rdi ; \
movq %rax, %r8 ; \
testq %rsi, %rsi ; \
cmovs %rbp, %r8 ; \
testq $0x1, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
sarq $1, %rcx ; \
movl $0x100000, %eax ; \
leaq (%rbx,%rax), %rdx ; \
leaq (%rcx,%rax), %rdi ; \
shlq $0x16, %rdx ; \
shlq $0x16, %rdi ; \
sarq $0x2b, %rdx ; \
sarq $0x2b, %rdi ; \
movabsq $0x20000100000, %rax ; \
leaq (%rbx,%rax), %rbx ; \
leaq (%rcx,%rax), %rcx ; \
sarq $0x2a, %rbx ; \
sarq $0x2a, %rcx ; \
movq %rdx, MAT(%rsp) ; \
movq %rbx, MAT+0x8(%rsp) ; \
movq %rdi, MAT+0x10(%rsp) ; \
movq %rcx, MAT+0x18(%rsp) ; \
movq fin, %r12 ; \
imulq %r12, %rdi ; \
imulq %rdx, %r12 ; \
movq gin, %r13 ; \
imulq %r13, %rbx ; \
imulq %rcx, %r13 ; \
addq %rbx, %r12 ; \
addq %rdi, %r13 ; \
sarq $0x14, %r12 ; \
sarq $0x14, %r13 ; \
movq %r12, %rbx ; \
andq $0xfffff, %rbx ; \
movabsq $0xfffffe0000000000, %rax ; \
orq %rax, %rbx ; \
movq %r13, %rcx ; \
andq $0xfffff, %rcx ; \
movabsq $0xc000000000000000, %rax ; \
orq %rax, %rcx ; \
movq $0xfffffffffffffffe, %rax ; \
movl $0x2, %edx ; \
movq %rbx, %rdi ; \
movq %rax, %r8 ; \
testq %rsi, %rsi ; \
cmovs %rbp, %r8 ; \
testq $0x1, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
sarq $1, %rcx ; \
movl $0x100000, %eax ; \
leaq (%rbx,%rax), %r8 ; \
leaq (%rcx,%rax), %r10 ; \
shlq $0x16, %r8 ; \
shlq $0x16, %r10 ; \
sarq $0x2b, %r8 ; \
sarq $0x2b, %r10 ; \
movabsq $0x20000100000, %rax ; \
leaq (%rbx,%rax), %r15 ; \
leaq (%rcx,%rax), %r11 ; \
sarq $0x2a, %r15 ; \
sarq $0x2a, %r11 ; \
movq %r13, %rbx ; \
movq %r12, %rcx ; \
imulq %r8, %r12 ; \
imulq %r15, %rbx ; \
addq %rbx, %r12 ; \
imulq %r11, %r13 ; \
imulq %r10, %rcx ; \
addq %rcx, %r13 ; \
sarq $0x14, %r12 ; \
sarq $0x14, %r13 ; \
movq %r12, %rbx ; \
andq $0xfffff, %rbx ; \
movabsq $0xfffffe0000000000, %rax ; \
orq %rax, %rbx ; \
movq %r13, %rcx ; \
andq $0xfffff, %rcx ; \
movabsq $0xc000000000000000, %rax ; \
orq %rax, %rcx ; \
movq MAT(%rsp), %rax ; \
imulq %r8, %rax ; \
movq MAT+0x10(%rsp), %rdx ; \
imulq %r15, %rdx ; \
imulq MAT+0x8(%rsp), %r8 ; \
imulq MAT+0x18(%rsp), %r15 ; \
addq %r8, %r15 ; \
leaq (%rax,%rdx), %r9 ; \
movq MAT(%rsp), %rax ; \
imulq %r10, %rax ; \
movq MAT+0x10(%rsp), %rdx ; \
imulq %r11, %rdx ; \
imulq MAT+0x8(%rsp), %r10 ; \
imulq MAT+0x18(%rsp), %r11 ; \
addq %r10, %r11 ; \
leaq (%rax,%rdx), %r13 ; \
movq $0xfffffffffffffffe, %rax ; \
movl $0x2, %edx ; \
movq %rbx, %rdi ; \
movq %rax, %r8 ; \
testq %rsi, %rsi ; \
cmovs %rbp, %r8 ; \
testq $0x1, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
sarq $1, %rcx ; \
movl $0x100000, %eax ; \
leaq (%rbx,%rax), %r8 ; \
leaq (%rcx,%rax), %r12 ; \
shlq $0x15, %r8 ; \
shlq $0x15, %r12 ; \
sarq $0x2b, %r8 ; \
sarq $0x2b, %r12 ; \
movabsq $0x20000100000, %rax ; \
leaq (%rbx,%rax), %r10 ; \
leaq (%rcx,%rax), %r14 ; \
sarq $0x2b, %r10 ; \
sarq $0x2b, %r14 ; \
movq %r9, %rax ; \
imulq %r8, %rax ; \
movq %r13, %rdx ; \
imulq %r10, %rdx ; \
imulq %r15, %r8 ; \
imulq %r11, %r10 ; \
addq %r8, %r10 ; \
leaq (%rax,%rdx), %r8 ; \
movq %r9, %rax ; \
imulq %r12, %rax ; \
movq %r13, %rdx ; \
imulq %r14, %rdx ; \
imulq %r15, %r12 ; \
imulq %r11, %r14 ; \
addq %r12, %r14 ; \
leaq (%rax,%rdx), %r12
S2N_BN_SYMBOL(bignum_montinv_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room for temporaries
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Save the return pointer for the end so we can overwrite %rdi later
movq %rdi, res
// Create constant [%rdx;%rcx;%rbx;%rax] = p_sm2 and copy it into the variable f
// including the 5th zero digit
xorl %ebp, %ebp
leaq -1(%rbp), %rax
movl $0x00000000ffffffff, %ebx
notq %rbx
movq %rax, %rcx
movq %rax, %rdx
btr $32, %rdx
movq %rax, F(%rsp)
movq %rbx, F+8(%rsp)
movq %rcx, F+16(%rsp)
movq %rdx, F+24(%rsp)
movq %rbp, F+32(%rsp)
// Now reduce the input modulo p_sm2, first negating the constant to get
// [%rdx;%rcx;%rbx;%rax] = 2^256 - p_sm2, adding it to x and hence getting
// the comparison x < p_sm2 <=> (2^256 - p_sm2) + x < 2^256 and choosing
// g accordingly.
movq (%rsi), %r8
movq 8(%rsi), %r9
movq 16(%rsi), %r10
movq 24(%rsi), %r11
movl $1, %eax
notq %rbx
xorl %ecx, %ecx
notq %rdx
addq %r8, %rax
adcq %r9, %rbx
adcq %r10, %rcx
adcq %r11, %rdx
cmovncq %r8, %rax
cmovncq %r9, %rbx
cmovncq %r10, %rcx
cmovncq %r11, %rdx
movq %rax, G(%rsp)
movq %rbx, G+8(%rsp)
movq %rcx, G+16(%rsp)
movq %rdx, G+24(%rsp)
xorl %eax, %eax
movq %rax, G+32(%rsp)
// Also maintain reduced < 2^256 vector [u,v] such that
// [f,g] == x * 2^{5*i-562} * [u,v] (mod p_sm2)
// starting with [p_sm2,x] == x * 2^{5*0-562} * [0,2^562] (mod p_sm2)
// The weird-looking 5*i modifications come in because we are doing
// 64-bit word-sized Montgomery reductions at each stage, which is
// 5 bits more than the 59-bit requirement to keep things stable.
// After the 10th and last iteration and sign adjustment, when
// f == 1 for in-scope cases, we have x * 2^{50-562} * u == 1, i.e.
// x * u == 2^512 as required.
xorl %eax, %eax
movq %rax, U(%rsp)
movq %rax, U+8(%rsp)
movq %rax, U+16(%rsp)
movq %rax, U+24(%rsp)
movq $0x000c000000100000, %rax
movq %rax, V(%rsp)
movq $0x000bfffffff80000, %rax
movq %rax, V+8(%rsp)
movq $0x00040000000c0000, %rax
movq %rax, V+16(%rsp)
movq $0x0018000000040000, %rax
movq %rax, V+24(%rsp)
// Start of main loop. We jump into the middle so that the divstep
// portion is common to the special tenth iteration after a uniform
// first 9.
movq $10, i
movq $1, d
jmp Lbignum_montinv_sm2_midloop
Lbignum_montinv_sm2_loop:
// Separate out the matrix into sign-magnitude pairs
movq %r8, %r9
sarq $63, %r9
xorq %r9, %r8
subq %r9, %r8
movq %r10, %r11
sarq $63, %r11
xorq %r11, %r10
subq %r11, %r10
movq %r12, %r13
sarq $63, %r13
xorq %r13, %r12
subq %r13, %r12
movq %r14, %r15
sarq $63, %r15
xorq %r15, %r14
subq %r15, %r14
// Adjust the initial values to allow for complement instead of negation
// This initial offset is the same for [f,g] and [u,v] compositions.
// Save it in temporary storage for the [u,v] part and do [f,g] first.
movq %r8, %rax
andq %r9, %rax
movq %r10, %rdi
andq %r11, %rdi
addq %rax, %rdi
movq %rdi, tmp
movq %r12, %rax
andq %r13, %rax
movq %r14, %rsi
andq %r15, %rsi
addq %rax, %rsi
movq %rsi, tmp2
// Now the computation of the updated f and g values. This maintains a
// 2-word carry between stages so we can conveniently insert the shift
// right by 59 before storing back, and not overwrite digits we need
// again of the old f and g values.
//
// Digit 0 of [f,g]
xorl %ebx, %ebx
movq F(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rdi
adcq %rdx, %rbx
movq G(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rdi
adcq %rdx, %rbx
xorl %ebp, %ebp
movq F(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rsi
adcq %rdx, %rbp
movq G(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rsi
adcq %rdx, %rbp
// Digit 1 of [f,g]
xorl %ecx, %ecx
movq F+N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq G+N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
shrdq $59, %rbx, %rdi
movq %rdi, F(%rsp)
xorl %edi, %edi
movq F+N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rbp
adcq %rdx, %rdi
movq G+N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rdi
shrdq $59, %rbp, %rsi
movq %rsi, G(%rsp)
// Digit 2 of [f,g]
xorl %esi, %esi
movq F+2*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rsi
movq G+2*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
adcq %rdx, %rsi
shrdq $59, %rcx, %rbx
movq %rbx, F+N(%rsp)
xorl %ebx, %ebx
movq F+2*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rdi
adcq %rdx, %rbx
movq G+2*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rdi
adcq %rdx, %rbx
shrdq $59, %rdi, %rbp
movq %rbp, G+N(%rsp)
// Digits 3 and 4 of [f,g]
movq F+3*N(%rsp), %rax
xorq %r9, %rax
movq F+4*N(%rsp), %rbp
xorq %r9, %rbp
andq %r8, %rbp
negq %rbp
mulq %r8
addq %rax, %rsi
adcq %rdx, %rbp
movq G+3*N(%rsp), %rax
xorq %r11, %rax
movq G+4*N(%rsp), %rdx
xorq %r11, %rdx
andq %r10, %rdx
subq %rdx, %rbp
mulq %r10
addq %rax, %rsi
adcq %rdx, %rbp
shrdq $59, %rsi, %rcx
movq %rcx, F+2*N(%rsp)
shrdq $59, %rbp, %rsi
sarq $59, %rbp
movq F+3*N(%rsp), %rax
movq %rsi, F+3*N(%rsp)
movq F+4*N(%rsp), %rsi
movq %rbp, F+4*N(%rsp)
xorq %r13, %rax
xorq %r13, %rsi
andq %r12, %rsi
negq %rsi
mulq %r12
addq %rax, %rbx
adcq %rdx, %rsi
movq G+3*N(%rsp), %rax
xorq %r15, %rax
movq G+4*N(%rsp), %rdx
xorq %r15, %rdx
andq %r14, %rdx
subq %rdx, %rsi
mulq %r14
addq %rax, %rbx
adcq %rdx, %rsi
shrdq $59, %rbx, %rdi
movq %rdi, G+2*N(%rsp)
shrdq $59, %rsi, %rbx
movq %rbx, G+3*N(%rsp)
sarq $59, %rsi
movq %rsi, G+4*N(%rsp)
// Get the initial carries back from storage and do the [u,v] accumulation
movq tmp, %rbx
movq tmp2, %rbp
// Digit 0 of [u,v]
xorl %ecx, %ecx
movq U(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
xorl %esi, %esi
movq U(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rbx, U(%rsp)
addq %rax, %rbp
adcq %rdx, %rsi
movq V(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rsi
movq %rbp, V(%rsp)
// Digit 1 of [u,v]
xorl %ebx, %ebx
movq U+N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
adcq %rdx, %rbx
xorl %ebp, %ebp
movq U+N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rcx, U+N(%rsp)
addq %rax, %rsi
adcq %rdx, %rbp
movq V+N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rsi
adcq %rdx, %rbp
movq %rsi, V+N(%rsp)
// Digit 2 of [u,v]
xorl %ecx, %ecx
movq U+2*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V+2*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
xorl %esi, %esi
movq U+2*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rbx, U+2*N(%rsp)
addq %rax, %rbp
adcq %rdx, %rsi
movq V+2*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rsi
movq %rbp, V+2*N(%rsp)
// Digits 3 and 4 of u (top is unsigned)
movq U+3*N(%rsp), %rax
xorq %r9, %rax
movq %r9, %rbx
andq %r8, %rbx
negq %rbx
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+3*N(%rsp), %rax
xorq %r11, %rax
movq %r11, %rdx
andq %r10, %rdx
subq %rdx, %rbx
mulq %r10
addq %rax, %rcx
adcq %rbx, %rdx
// Preload for last use of old u digit 3
movq U+3*N(%rsp), %rax
movq %rcx, U+3*N(%rsp)
movq %rdx, U+4*N(%rsp)
// Digits 3 and 4 of v (top is unsigned)
xorq %r13, %rax
movq %r13, %rcx
andq %r12, %rcx
negq %rcx
mulq %r12
addq %rax, %rsi
adcq %rdx, %rcx
movq V+3*N(%rsp), %rax
xorq %r15, %rax
movq %r15, %rdx
andq %r14, %rdx
subq %rdx, %rcx
mulq %r14
addq %rax, %rsi
adcq %rcx, %rdx
movq %rsi, V+3*N(%rsp)
movq %rdx, V+4*N(%rsp)
// Montgomery reduction of u
amontred(u)
// Montgomery reduction of v
amontred(v)
Lbignum_montinv_sm2_midloop:
divstep59(d,ff,gg)
movq %rsi, d
// Next iteration
decq i
jnz Lbignum_montinv_sm2_loop
// The 10th and last iteration does not need anything except the
// u value and the sign of f; the latter can be obtained from the
// lowest word of f. So it's done differently from the main loop.
// Find the sign of the new f. For this we just need one digit
// since we know (for in-scope cases) that f is either +1 or -1.
// We don't explicitly shift right by 59 either, but looking at
// bit 63 (or any bit >= 60) of the unshifted result is enough
// to distinguish -1 from +1; this is then made into a mask.
movq F(%rsp), %rax
movq G(%rsp), %rcx
imulq %r8, %rax
imulq %r10, %rcx
addq %rcx, %rax
sarq $63, %rax
// Now separate out the matrix into sign-magnitude pairs
// and adjust each one based on the sign of f.
//
// Note that at this point we expect |f|=1 and we got its
// sign above, so then since [f,0] == x * 2^{-512} [u,v] (mod p_sm2)
// we want to flip the sign of u according to that of f.
movq %r8, %r9
sarq $63, %r9
xorq %r9, %r8
subq %r9, %r8
xorq %rax, %r9
movq %r10, %r11
sarq $63, %r11
xorq %r11, %r10
subq %r11, %r10
xorq %rax, %r11
movq %r12, %r13
sarq $63, %r13
xorq %r13, %r12
subq %r13, %r12
xorq %rax, %r13
movq %r14, %r15
sarq $63, %r15
xorq %r15, %r14
subq %r15, %r14
xorq %rax, %r15
// Adjust the initial value to allow for complement instead of negation
movq %r8, %rax
andq %r9, %rax
movq %r10, %r12
andq %r11, %r12
addq %rax, %r12
// Digit 0 of [u]
xorl %r13d, %r13d
movq U(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %r12
adcq %rdx, %r13
movq V(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %r12
adcq %rdx, %r13
// Digit 1 of [u]
xorl %r14d, %r14d
movq U+N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %r13
adcq %rdx, %r14
movq V+N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %r13
adcq %rdx, %r14
// Digit 2 of [u]
xorl %r15d, %r15d
movq U+2*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %r14
adcq %rdx, %r15
movq V+2*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %r14
adcq %rdx, %r15
// Digits 3 and 4 of u (top is unsigned)
movq U+3*N(%rsp), %rax
xorq %r9, %rax
andq %r8, %r9
negq %r9
mulq %r8
addq %rax, %r15
adcq %rdx, %r9
movq V+3*N(%rsp), %rax
xorq %r11, %rax
movq %r11, %rdx
andq %r10, %rdx
subq %rdx, %r9
mulq %r10
addq %rax, %r15
adcq %rdx, %r9
// Store back and Montgomery reduce u
movq %r12, U(%rsp)
movq %r13, U+N(%rsp)
movq %r14, U+2*N(%rsp)
movq %r15, U+3*N(%rsp)
movq %r9, U+4*N(%rsp)
amontred(u)
// Perform final strict reduction mod p_sm2 and copy to output
movq U(%rsp), %r8
movq U+N(%rsp), %r9
movq U+2*N(%rsp), %r10
movq U+3*N(%rsp), %r11
movl $1, %eax
movl $0x00000000ffffffff, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
bts $32, %rdx
addq %r8, %rax
adcq %r9, %rbx
adcq %r10, %rcx
adcq %r11, %rdx
cmovncq %r8, %rax
cmovncq %r9, %rbx
cmovncq %r10, %rcx
cmovncq %r11, %rdx
movq res, %rdi
movq %rax, (%rdi)
movq %rbx, N(%rdi)
movq %rcx, 2*N(%rdi)
movq %rdx, 3*N(%rdi)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montinv_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 5,451
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_tomont_sm2.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert to Montgomery form z := (2^256 * x) mod p_sm2
// Input x[4]; output z[4]
//
// extern void bignum_tomont_sm2(uint64_t z[static 4], const uint64_t x[static 4]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_sm2)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_sm2)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_sm2)
.text
#define z %rdi
#define x %rsi
#define m0 %r8
#define m1 %r9
#define m2 %r10
#define m3 %r11
#define q %rax
#define n1 %rcx
#define n3 %rdx
#define qshort %eax
// ----------------------------------------------------------------------------
// Core "x |-> (2^64 * x) mod p_sm2" macro, with x assumed to be < p_sm2.
// Input is [d3;d2;d1;d0] and output is [d2;d1;d0;q] where q is a fixed
// quotient estimate register (%rax), so the registers get shuffled.
// ----------------------------------------------------------------------------
#define modstep_sm2(d3,d2,d1,d0) \
/* Writing the input, with lower zero digit appended, as */ \
/* z = 2^256 * d3 + 2^192 * d2 + t, quotient approximation is */ \
/* MIN ((d3 * (1 + 2^32 + 2^64) + d2 + 2^64) >> 64) (2^64 - 1) */ \
movq d2, n1 ; \
movl $1, qshort ; \
addq d3, n1 ; \
adcq d3, q ; \
shrq $32, n1 ; \
addq d3, n1 ; \
shrq $32, n1 ; \
addq n1, q ; \
sbbq $0, q ; \
/* Compute the pre-reduced [d3;d2;d1;d0;q] = m - p_sm2 * q */ \
/* = z - (2^256 - 2^224 - 2^96 + 2^64 - 1) * q */ \
movq q, n1 ; \
movq q, n3 ; \
shlq $32, n1 ; \
shrq $32, n3 ; \
addq n1, d2 ; \
adcq n3, d3 ; \
subq q, n1 ; \
sbbq $0, n3 ; \
subq q, d3 ; \
addq n1, d0 ; \
adcq n3, d1 ; \
adcq $0, d2 ; \
adcq $0, d3 ; \
/* Corrective addition with top word d3 as a bitmask */ \
movq $0xffffffff00000000, n1 ; \
andq d3, n1 ; \
movq $0xfffffffeffffffff, n3 ; \
andq d3, n3 ; \
addq d3, q ; \
adcq n1, d0 ; \
adcq d3, d1 ; \
adcq n3, d2
S2N_BN_SYMBOL(bignum_tomont_sm2):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load the inputs
movq (x), m0
movq 8(x), m1
movq 16(x), m2
movq 24(x), m3
// Load non-trivial digits [n3; -1; n1; -1] = p_sm2 and do a conditional
// subtraction to reduce the four starting digits [m3;m2;m1;m0] modulo p_sm2
subq $-1, m0
movq $0xffffffff00000000, n1
sbbq n1, m1
movq $0xfffffffeffffffff, n3
sbbq $-1, m2
sbbq n3, m3
sbbq q, q
andq q, n1
andq q, n3
addq q, m0
adcq n1, m1
adcq q, m2
adcq n3, m3
// Now do 4 iterations of 5->4 word modular reduction
modstep_sm2(m3,m2,m1,m0)
movq q, m3
modstep_sm2(m2,m1,m0,m3)
movq q, m2
modstep_sm2(m1,m0,m3,m2)
movq q, m1
modstep_sm2(m0,m3,m2,m1)
// Write back result and return
movq q, (z)
movq m1, 8(z)
movq m2, 16(z)
movq m3, 24(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_tomont_sm2)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 6,743
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sm2/bignum_montmul_sm2_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery multiply, z := (x * y / 2^256) mod p_sm2
// Inputs x[4], y[4]; output z[4]
//
// extern void bignum_montmul_sm2_alt(uint64_t z[static 4],
// const uint64_t x[static 4],
// const uint64_t y[static 4]);
//
// Does z := (2^{-256} * x * y) mod p_sm2, assuming that the inputs x and y
// satisfy x * y <= 2^256 * p_sm2 (in particular this is true if we are in
// the "usual" case x < p_sm2 and y < p_sm2).
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_sm2_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_sm2_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_sm2_alt)
.text
#define z %rdi
#define x %rsi
// We move the y argument here so we can use %rdx for multipliers
#define y %rcx
// Macro for the key "multiply and add to (c,h,l)" step
#define combadd(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
// A minutely shorter form for when c = 0 initially
#define combadz(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq c, c
// A short form where we don't expect a top carry
#define combads(h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h
// ---------------------------------------------------------------------------
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d3;d2;d1;d0] and returns result in [d0;d3;d2;d1], adding to the
// existing contents of [d3;d2;d1], and using %rax, %rcx, %rdx and %rbx
// as temporaries.
// ---------------------------------------------------------------------------
#define montreds(d3,d2,d1,d0) \
movq d0, %rax ; \
shlq $32, %rax ; \
movq d0, %rcx ; \
shrq $32, %rcx ; \
movq %rax, %rdx ; \
movq %rcx, %rbx ; \
subq d0, %rax ; \
sbbq $0, %rcx ; \
subq %rax, d1 ; \
sbbq %rcx, d2 ; \
sbbq %rdx, d3 ; \
sbbq %rbx, d0
S2N_BN_SYMBOL(bignum_montmul_sm2_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
// Copy y into a safe register to start with
movq %rdx, y
// Start the window as [%r10;%r9;%r8] with 00 product
movq (x), %rax
mulq (y)
movq %rax, %r8
movq %rdx, %r9
xorq %r10, %r10
// Column 1
xorq %r11, %r11
combads(%r10,%r9,(x),8(y))
combadz(%r11,%r10,%r9,8(x),(y))
// Column 2
xorq %r12, %r12
combadz(%r12,%r11,%r10,(x),16(y))
combadd(%r12,%r11,%r10,8(x),8(y))
combadd(%r12,%r11,%r10,16(x),(y))
// Column 3
xorq %r13, %r13
combadz(%r13,%r12,%r11,(x),24(y))
combadd(%r13,%r12,%r11,8(x),16(y))
combadd(%r13,%r12,%r11,16(x),8(y))
combadd(%r13,%r12,%r11,24(x),(y))
// Column 4
xorq %r14, %r14
combadz(%r14,%r13,%r12,8(x),24(y))
combadd(%r14,%r13,%r12,16(x),16(y))
combadd(%r14,%r13,%r12,24(x),8(y))
// Column 5
xorq %r15, %r15
combadz(%r15,%r14,%r13,16(x),24(y))
combadd(%r15,%r14,%r13,24(x),16(y))
// Final work for columns 6 and 7
movq 24(x), %rax
mulq 24(y)
addq %rax, %r14
adcq %rdx, %r15
// Multiplication complete. Perform 4 Montgomery steps to rotate the lower half
montreds(%r11,%r10,%r9,%r8)
montreds(%r8,%r11,%r10,%r9)
montreds(%r9,%r8,%r11,%r10)
montreds(%r10,%r9,%r8,%r11)
// Add high and low parts, catching carry in %rax
xorl %eax, %eax
addq %r8, %r12
adcq %r9, %r13
adcq %r10, %r14
adcq %r11, %r15
adcq %rax, %rax
// Load [%r8;%r11;%rbx;%rdx;%rcx] = 2^320 - p_sm2 then do
// [%r8;%r11;%rbx;%rdx;%rcx] = [%rax;%r15;%r14;%r13;%r12] + (2^320 - p_sm2)
movl $1, %ecx
movl $0x00000000FFFFFFFF, %edx
xorl %ebx, %ebx
addq %r12, %rcx
leaq 1(%rdx), %r11
adcq %r13, %rdx
leaq -1(%rbx), %r8
adcq %r14, %rbx
adcq %r15, %r11
adcq %rax, %r8
// Now carry is set if r + (2^320 - p_sm2) >= 2^320, i.e. r >= p_sm2
// where r is the pre-reduced form. So conditionally select the
// output accordingly.
cmovcq %rcx, %r12
cmovcq %rdx, %r13
cmovcq %rbx, %r14
cmovcq %r11, %r15
// Write back reduced value
movq %r12, (z)
movq %r13, 8(z)
movq %r14, 16(z)
movq %r15, 24(z)
// Restore saved registers and return
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montmul_sm2_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 13,408
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/sha3/sha3_keccak_f1600.S
|
// Copyright (c) 2017-2024 The OpenSSL Project Authors
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Written by Andy Polyakov, @dot-asm, initially for use in the OpenSSL
// project.
// ----------------------------------------------------------------------------
// Keccak-f1600 permutation for SHA3
// Input a[25], rc[24]; output a[25]
//
// Keccak-f1600 permutation operation is at the core of SHA3 and SHAKE
// and is fully specified here:
//
// https://keccak.team/files/Keccak-reference-3.0.pdf
//
// extern void sha3_keccak_f1600(uint64_t a[25], const uint64_t rc[24]);
//
// Standard x86-64 ABI: RDI = a, RSI = rc
// Microsoft x64 ABI: RCX = a, RDX = rc
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(sha3_keccak_f1600)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(sha3_keccak_f1600)
S2N_BN_SYM_PRIVACY_DIRECTIVE(sha3_keccak_f1600)
.text
S2N_BN_SYMBOL(sha3_keccak_f1600):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(208)
notq 0x08(%rdi)
notq 0x10(%rdi)
notq 0x40(%rdi)
notq 0x60(%rdi)
notq 0x88(%rdi)
notq 0xa0(%rdi)
leaq (%rsp), %r15
movq 0xa0(%rdi), %rax
movq 0xa8(%rdi), %rbx
movq 0xb0(%rdi), %rcx
movq 0xb8(%rdi), %rdx
movq 0xc0(%rdi), %rbp
mov $0x0, %r8
Lsha3_keccak_f1600_loop:
movq %r8, 0xc8(%rsp)
movq (%rdi), %r8
movq 0x30(%rdi), %r9
movq 0x60(%rdi), %r10
movq 0x90(%rdi), %r11
xorq 0x10(%rdi), %rcx
xorq 0x18(%rdi), %rdx
xorq %r8, %rax
xorq 0x08(%rdi), %rbx
xorq 0x38(%rdi), %rcx
xorq 0x28(%rdi), %rax
movq %rbp, %r12
xorq 0x20(%rdi), %rbp
xorq %r10, %rcx
xorq 0x50(%rdi), %rax
xorq 0x40(%rdi), %rdx
xorq %r9, %rbx
xorq 0x48(%rdi), %rbp
xorq 0x88(%rdi), %rcx
xorq 0x78(%rdi), %rax
xorq 0x68(%rdi), %rdx
xorq 0x58(%rdi), %rbx
xorq 0x70(%rdi), %rbp
movq %rcx, %r13
rol $1, %rcx
xorq %rax, %rcx
xorq %r11, %rdx
rol $1, %rax
xorq %rdx, %rax
xorq 0x80(%rdi), %rbx
rol $1, %rdx
xorq %rbx, %rdx
xorq 0x98(%rdi), %rbp
rol $1, %rbx
xorq %rbp, %rbx
rol $1, %rbp
xorq %r13, %rbp
xorq %rcx, %r9
xorq %rdx, %r10
rol $0x2c, %r9
xorq %rbp, %r11
xorq %rax, %r12
rol $0x2b, %r10
xorq %rbx, %r8
movq %r9, %r13
rol $0x15, %r11
orq %r10, %r9
xorq %r8, %r9
rol $0xe, %r12
xorq (%rsi), %r9
movq %r12, %r14
andq %r11, %r12
movq %r9, (%r15)
xorq %r10, %r12
notq %r10
movq %r12, 0x10(%r15)
orq %r11, %r10
movq 0xb0(%rdi), %r12
xorq %r13, %r10
movq %r10, 0x08(%r15)
andq %r8, %r13
movq 0x48(%rdi), %r9
xorq %r14, %r13
movq 0x50(%rdi), %r10
movq %r13, 0x20(%r15)
orq %r8, %r14
movq 0x18(%rdi), %r8
xorq %r11, %r14
movq 0x80(%rdi), %r11
movq %r14, 0x18(%r15)
xorq %rbp, %r8
xorq %rdx, %r12
rol $0x1c, %r8
xorq %rcx, %r11
xorq %rax, %r9
rol $0x3d, %r12
rol $0x2d, %r11
xorq %rbx, %r10
rol $0x14, %r9
movq %r8, %r13
orq %r12, %r8
rol $0x3, %r10
xorq %r11, %r8
movq %r8, 0x40(%r15)
movq %r9, %r14
andq %r13, %r9
movq 0x08(%rdi), %r8
xorq %r12, %r9
notq %r12
movq %r9, 0x48(%r15)
orq %r11, %r12
movq 0x38(%rdi), %r9
xorq %r10, %r12
movq %r12, 0x38(%r15)
andq %r10, %r11
movq 0xa0(%rdi), %r12
xorq %r14, %r11
movq %r11, 0x30(%r15)
orq %r10, %r14
movq 0x68(%rdi), %r10
xorq %r13, %r14
movq 0x98(%rdi), %r11
movq %r14, 0x28(%r15)
xorq %rbp, %r10
xorq %rax, %r11
rol $0x19, %r10
xorq %rdx, %r9
rol $0x8, %r11
xorq %rbx, %r12
rol $0x6, %r9
xorq %rcx, %r8
rol $0x12, %r12
movq %r10, %r13
andq %r11, %r10
rol $1, %r8
notq %r11
xorq %r9, %r10
movq %r10, 0x58(%r15)
movq %r12, %r14
andq %r11, %r12
movq 0x58(%rdi), %r10
xorq %r13, %r12
movq %r12, 0x60(%r15)
orq %r9, %r13
movq 0xb8(%rdi), %r12
xorq %r8, %r13
movq %r13, 0x50(%r15)
andq %r8, %r9
xorq %r14, %r9
movq %r9, 0x70(%r15)
orq %r8, %r14
movq 0x28(%rdi), %r9
xorq %r11, %r14
movq 0x88(%rdi), %r11
movq %r14, 0x68(%r15)
movq 0x20(%rdi), %r8
xorq %rcx, %r10
xorq %rdx, %r11
rol $0xa, %r10
xorq %rbx, %r9
rol $0xf, %r11
xorq %rbp, %r12
rol $0x24, %r9
xorq %rax, %r8
rol $0x38, %r12
movq %r10, %r13
orq %r11, %r10
rol $0x1b, %r8
notq %r11
xorq %r9, %r10
movq %r10, 0x80(%r15)
movq %r12, %r14
orq %r11, %r12
xorq %r13, %r12
movq %r12, 0x88(%r15)
andq %r9, %r13
xorq %r8, %r13
movq %r13, 0x78(%r15)
orq %r8, %r9
xorq %r14, %r9
movq %r9, 0x98(%r15)
andq %r14, %r8
xorq %r11, %r8
movq %r8, 0x90(%r15)
xorq 0x10(%rdi), %rdx
xorq 0x40(%rdi), %rbp
rol $0x3e, %rdx
xorq 0xa8(%rdi), %rcx
rol $0x37, %rbp
xorq 0x70(%rdi), %rax
rol $0x2, %rcx
xorq 0x78(%rdi), %rbx
xchg %r15, %rdi
rol $0x27, %rax
rol $0x29, %rbx
movq %rdx, %r13
andq %rbp, %rdx
notq %rbp
xorq %rcx, %rdx
movq %rdx, 0xc0(%rdi)
movq %rax, %r14
andq %rbp, %rax
xorq %r13, %rax
movq %rax, 0xa0(%rdi)
orq %rcx, %r13
xorq %rbx, %r13
movq %r13, 0xb8(%rdi)
andq %rbx, %rcx
xorq %r14, %rcx
movq %rcx, 0xb0(%rdi)
orq %r14, %rbx
xorq %rbp, %rbx
movq %rbx, 0xa8(%rdi)
movq %rdx, %rbp
movq %r13, %rdx
leaq 0x8(%rsi), %rsi
movq (%rdi), %r8
movq 0x30(%rdi), %r9
movq 0x60(%rdi), %r10
movq 0x90(%rdi), %r11
xorq 0x10(%rdi), %rcx
xorq 0x18(%rdi), %rdx
xorq %r8, %rax
xorq 0x08(%rdi), %rbx
xorq 0x38(%rdi), %rcx
xorq 0x28(%rdi), %rax
movq %rbp, %r12
xorq 0x20(%rdi), %rbp
xorq %r10, %rcx
xorq 0x50(%rdi), %rax
xorq 0x40(%rdi), %rdx
xorq %r9, %rbx
xorq 0x48(%rdi), %rbp
xorq 0x88(%rdi), %rcx
xorq 0x78(%rdi), %rax
xorq 0x68(%rdi), %rdx
xorq 0x58(%rdi), %rbx
xorq 0x70(%rdi), %rbp
movq %rcx, %r13
rol $1, %rcx
xorq %rax, %rcx
xorq %r11, %rdx
rol $1, %rax
xorq %rdx, %rax
xorq 0x80(%rdi), %rbx
rol $1, %rdx
xorq %rbx, %rdx
xorq 0x98(%rdi), %rbp
rol $1, %rbx
xorq %rbp, %rbx
rol $1, %rbp
xorq %r13, %rbp
xorq %rcx, %r9
xorq %rdx, %r10
rol $0x2c, %r9
xorq %rbp, %r11
xorq %rax, %r12
rol $0x2b, %r10
xorq %rbx, %r8
movq %r9, %r13
rol $0x15, %r11
orq %r10, %r9
xorq %r8, %r9
rol $0xe, %r12
xorq (%rsi), %r9
movq %r12, %r14
andq %r11, %r12
movq %r9, (%r15)
xorq %r10, %r12
notq %r10
movq %r12, 0x10(%r15)
orq %r11, %r10
movq 0xb0(%rdi), %r12
xorq %r13, %r10
movq %r10, 0x08(%r15)
andq %r8, %r13
movq 0x48(%rdi), %r9
xorq %r14, %r13
movq 0x50(%rdi), %r10
movq %r13, 0x20(%r15)
orq %r8, %r14
movq 0x18(%rdi), %r8
xorq %r11, %r14
movq 0x80(%rdi), %r11
movq %r14, 0x18(%r15)
xorq %rbp, %r8
xorq %rdx, %r12
rol $0x1c, %r8
xorq %rcx, %r11
xorq %rax, %r9
rol $0x3d, %r12
rol $0x2d, %r11
xorq %rbx, %r10
rol $0x14, %r9
movq %r8, %r13
orq %r12, %r8
rol $0x3, %r10
xorq %r11, %r8
movq %r8, 0x40(%r15)
movq %r9, %r14
andq %r13, %r9
movq 0x08(%rdi), %r8
xorq %r12, %r9
notq %r12
movq %r9, 0x48(%r15)
orq %r11, %r12
movq 0x38(%rdi), %r9
xorq %r10, %r12
movq %r12, 0x38(%r15)
andq %r10, %r11
movq 0xa0(%rdi), %r12
xorq %r14, %r11
movq %r11, 0x30(%r15)
orq %r10, %r14
movq 0x68(%rdi), %r10
xorq %r13, %r14
movq 0x98(%rdi), %r11
movq %r14, 0x28(%r15)
xorq %rbp, %r10
xorq %rax, %r11
rol $0x19, %r10
xorq %rdx, %r9
rol $0x8, %r11
xorq %rbx, %r12
rol $0x6, %r9
xorq %rcx, %r8
rol $0x12, %r12
movq %r10, %r13
andq %r11, %r10
rol $1, %r8
notq %r11
xorq %r9, %r10
movq %r10, 0x58(%r15)
movq %r12, %r14
andq %r11, %r12
movq 0x58(%rdi), %r10
xorq %r13, %r12
movq %r12, 0x60(%r15)
orq %r9, %r13
movq 0xb8(%rdi), %r12
xorq %r8, %r13
movq %r13, 0x50(%r15)
andq %r8, %r9
xorq %r14, %r9
movq %r9, 0x70(%r15)
orq %r8, %r14
movq 0x28(%rdi), %r9
xorq %r11, %r14
movq 0x88(%rdi), %r11
movq %r14, 0x68(%r15)
movq 0x20(%rdi), %r8
xorq %rcx, %r10
xorq %rdx, %r11
rol $0xa, %r10
xorq %rbx, %r9
rol $0xf, %r11
xorq %rbp, %r12
rol $0x24, %r9
xorq %rax, %r8
rol $0x38, %r12
movq %r10, %r13
orq %r11, %r10
rol $0x1b, %r8
notq %r11
xorq %r9, %r10
movq %r10, 0x80(%r15)
movq %r12, %r14
orq %r11, %r12
xorq %r13, %r12
movq %r12, 0x88(%r15)
andq %r9, %r13
xorq %r8, %r13
movq %r13, 0x78(%r15)
orq %r8, %r9
xorq %r14, %r9
movq %r9, 0x98(%r15)
andq %r14, %r8
xorq %r11, %r8
movq %r8, 0x90(%r15)
xorq 0x10(%rdi), %rdx
xorq 0x40(%rdi), %rbp
rol $0x3e, %rdx
xorq 0xa8(%rdi), %rcx
rol $0x37, %rbp
xorq 0x70(%rdi), %rax
rol $0x2, %rcx
xorq 0x78(%rdi), %rbx
xchg %r15, %rdi
rol $0x27, %rax
rol $0x29, %rbx
movq %rdx, %r13
andq %rbp, %rdx
notq %rbp
xorq %rcx, %rdx
movq %rdx, 0xc0(%rdi)
movq %rax, %r14
andq %rbp, %rax
xorq %r13, %rax
movq %rax, 0xa0(%rdi)
orq %rcx, %r13
xorq %rbx, %r13
movq %r13, 0xb8(%rdi)
andq %rbx, %rcx
xorq %r14, %rcx
movq %rcx, 0xb0(%rdi)
orq %r14, %rbx
xorq %rbp, %rbx
movq %rbx, 0xa8(%rdi)
movq %rdx, %rbp
movq %r13, %rdx
leaq 0x8(%rsi), %rsi
movq 0xc8(%rsp), %r8
add $0x2, %r8
cmp $24, %r8
jne Lsha3_keccak_f1600_loop
leaq -0xc0(%rsi), %rsi
notq 0x08(%rdi)
notq 0x10(%rdi)
notq 0x40(%rdi)
notq 0x60(%rdi)
notq 0x88(%rdi)
notq 0xa0(%rdi)
CFI_INC_RSP(208)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(sha3_keccak_f1600)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 4,601
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_cmul_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply by a single word modulo p_521, z := (c * x) mod p_521, assuming
// x reduced
// Inputs c, x[9]; output z[9]
//
// extern void bignum_cmul_p521(uint64_t z[static 9], uint64_t c,
// const uint64_t x[static 9]);
//
// Standard x86-64 ABI: RDI = z, RSI = c, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = c, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p521)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p521)
.text
#define z %rdi
// Temporarily moved here for initial multiply
#define x %rcx
// Likewise this is thrown away after initial multiply
#define c %rdx
#define cshort %edx
#define a %rax
#define dd %rax
// Digits: last one aliased to the local x pointer that's no longer needed
#define d0 %rsi
#define d1 %r8
#define d2 %r9
#define d3 %r10
#define d4 %r11
#define d5 %rbx
#define d6 %rbp
#define d7 %r12
#define d8 %r13
#define d9 %rcx
// Same as d9
#define h d9
S2N_BN_SYMBOL(bignum_cmul_p521):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save additional registers to use
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
// Shuffle inputs (since we want the multiplier in %rdx)
movq %rdx, x
movq %rsi, c
// Multiply as [d9; ...; d0] = c * x.
mulxq (x), d0, d1
mulxq 8(x), a, d2
addq a, d1
mulxq 16(x), a, d3
adcq a, d2
mulxq 24(x), a, d4
adcq a, d3
mulxq 32(x), a, d5
adcq a, d4
mulxq 40(x), a, d6
adcq a, d5
mulxq 48(x), a, d7
adcq a, d6
mulxq 56(x), a, d8
adcq a, d7
mulxq 64(x), a, d9
adcq a, d8
adcq $0, d9
// Create an AND "dd" of digits d7,...,d1, a computation we hope will
// get nicely interleaved with the multiplication chain above.
// From the point of view of architectural dependencies we have to
// bunch it up here since AND destroys the flags and we overwrite the
// register used as a stage temporary variable for the multiplications.
movq d1, dd
andq d2, dd
andq d3, dd
andq d4, dd
andq d5, dd
andq d6, dd
andq d7, dd
// Extract the high part h==d9 and mask off the low part l = [d8;d7;...;d0]
// but stuff d8 with 1 bits at the left to ease a comparison below
shldq $55, d8, h
orq $~0x1FF, d8
// Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only
// happen if digits d7,...d1 are all 1s, we use the AND of them "dd" to
// condense the carry chain, and since we stuffed 1 bits into d8 we get
// the result in CF without an additional comparison. Hereafter we use c = 0.
// Since x was assumed reduced, h cannot be maximal, so the "lea" is safe,
// i.e. does not carry or wrap round.
leaq 1(h), c
addq d0, c
movl $0, cshort
adcq c, dd
movq d8, a
adcq c, a
// Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521
// while otherwise we want just h + l. So mask h + l + CF to 521 bits.
// This masking also gets rid of the stuffing with 1s we did above.
// Write back the digits as they are generated.
adcq h, d0
movq d0, (z)
adcq c, d1
movq d1, 8(z)
adcq c, d2
movq d2, 16(z)
adcq c, d3
movq d3, 24(z)
adcq c, d4
movq d4, 32(z)
adcq c, d5
movq d5, 40(z)
adcq c, d6
movq d6, 48(z)
adcq c, d7
movq d7, 56(z)
adcq c, d8
andq $0x1FF, d8
movq d8, 64(z)
// Restore registers and return
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_cmul_p521)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 53,094
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/p521_jmixadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point mixed addition on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jmixadd_alt(uint64_t p3[static 27],
// const uint64_t p1[static 27],
// const uint64_t p2[static 18]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// The "mixed" part means that p2 only has x and y coordinates, with the
// implicit z coordinate assumed to be the identity. It is assumed that
// all the coordinates of the input points p1 and p2 are fully reduced
// mod p_521, that the z coordinate of p1 is nonzero and that neither
// p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents the same affine
// point as".
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jmixadd_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jmixadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jmixadd_alt)
.text
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
// These are where they arrive except for input_y, initially in %rdx
#define input_z %rdi
#define input_x %rsi
#define input_y %rcx
// Pointer-offset pairs for inputs and outputs
#define x_1 0(input_x)
#define y_1 NUMSIZE(input_x)
#define z_1 (2*NUMSIZE)(input_x)
#define x_2 0(input_y)
#define y_2 NUMSIZE(input_y)
#define x_3 0(input_z)
#define y_3 NUMSIZE(input_z)
#define z_3 (2*NUMSIZE)(input_z)
// Pointer-offset pairs for temporaries, with some aliasing
// The tmp field is internal storage for field mul and sqr.
// NSPACE is the total stack needed for these temporaries
#define zp2 (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define tmp (NUMSIZE*6)(%rsp)
#define NSPACE NUMSIZE*7
// Corresponds exactly to bignum_mul_p521_alt except temp storage
#define mul_p521(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, 432(%rsp) ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
movq %r9, 440(%rsp) ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq %r10, 448(%rsp) ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq %r11, 456(%rsp) ; \
xorq %r14, %r14 ; \
movq P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x20+P1, %rax ; \
mulq P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq %r12, 464(%rsp) ; \
xorq %r15, %r15 ; \
movq P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x8+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x20+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x28+P1, %rax ; \
mulq P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq %r13, 472(%rsp) ; \
xorq %r8, %r8 ; \
movq P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq %r8, %r8 ; \
movq 0x8+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x10+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x20+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x28+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x30+P1, %rax ; \
mulq P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq %r14, 480(%rsp) ; \
xorq %r9, %r9 ; \
movq P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq %r9, %r9 ; \
movq 0x8+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x10+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x18+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x20+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x28+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x30+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x38+P1, %rax ; \
mulq P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq %r15, 488(%rsp) ; \
xorq %r10, %r10 ; \
movq P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %r10, %r10 ; \
movq 0x8+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x10+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x18+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x28+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x30+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x38+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x40+P1, %rax ; \
mulq P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq %r8, 496(%rsp) ; \
xorq %r11, %r11 ; \
movq 0x8+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
movq 0x10+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x18+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x20+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x28+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x30+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x38+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x40+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq 0x10+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x18+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x20+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x28+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x30+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x38+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x40+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq 0x18+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x20+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x28+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x30+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x38+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x40+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x20+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x28+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x30+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x38+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x40+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x28+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x30+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x38+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x40+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
xorq %r8, %r8 ; \
movq 0x30+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq %r8, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x40+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
imulq 0x40+P2, %rax ; \
addq %r8, %rax ; \
movq 496(%rsp), %r8 ; \
movq %r8, %rdx ; \
andq $0x1ff, %rdx ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rax, %r15 ; \
shrq $0x9, %rax ; \
addq %rax, %rdx ; \
stc; \
adcq 432(%rsp), %r8 ; \
adcq 440(%rsp), %r9 ; \
adcq 448(%rsp), %r10 ; \
adcq 456(%rsp), %r11 ; \
adcq 464(%rsp), %r12 ; \
adcq 472(%rsp), %r13 ; \
adcq 480(%rsp), %r14 ; \
adcq 488(%rsp), %r15 ; \
adcq $0xfffffffffffffe00, %rdx ; \
cmc; \
sbbq $0x0, %r8 ; \
movq %r8, P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0 ; \
sbbq $0x0, %r14 ; \
movq %r14, 0x30+P0 ; \
sbbq $0x0, %r15 ; \
movq %r15, 0x38+P0 ; \
sbbq $0x0, %rdx ; \
andq $0x1ff, %rdx ; \
movq %rdx, 0x40+P0
// Corresponds to bignum_sqr_p521_alt except %rbp is used
// in place of %rcx and the output as temp storage location
#define sqr_p521(P0,P1) \
movq P1, %rax ; \
mulq %rax; \
movq %rax, 432(%rsp) ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r11 ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq %r9, 440(%rsp) ; \
xorq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq %rax; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r12 ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq %r10, 448(%rsp) ; \
movq P1, %rax ; \
mulq 0x18+P1; \
xorq %r13, %r13 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r13 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r13, %r13 ; \
addq %rbx, %r11 ; \
adcq %rbp, %r12 ; \
adcq $0x0, %r13 ; \
movq %r11, 456(%rsp) ; \
movq P1, %rax ; \
mulq 0x20+P1; \
xorq %r14, %r14 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r14 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r14, %r14 ; \
addq %rbx, %r12 ; \
adcq %rbp, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq %r12, 464(%rsp) ; \
movq P1, %rax ; \
mulq 0x28+P1; \
xorq %r15, %r15 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x20+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r15 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r15, %r15 ; \
addq %rbx, %r13 ; \
adcq %rbp, %r14 ; \
adcq $0x0, %r15 ; \
movq %r13, 472(%rsp) ; \
movq P1, %rax ; \
mulq 0x30+P1; \
xorq %r8, %r8 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r8 ; \
movq 0x10+P1, %rax ; \
mulq 0x20+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r8 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r8, %r8 ; \
addq %rbx, %r14 ; \
adcq %rbp, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq %r14, 480(%rsp) ; \
movq P1, %rax ; \
mulq 0x38+P1; \
xorq %r9, %r9 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r9 ; \
movq 0x10+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r9 ; \
movq 0x18+P1, %rax ; \
mulq 0x20+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r9 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r9, %r9 ; \
addq %rbx, %r15 ; \
adcq %rbp, %r8 ; \
adcq $0x0, %r9 ; \
movq %r15, 488(%rsp) ; \
movq P1, %rax ; \
mulq 0x40+P1; \
xorq %r10, %r10 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r10 ; \
movq 0x10+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r10 ; \
movq 0x18+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r10 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r10, %r10 ; \
addq %rbx, %r8 ; \
adcq %rbp, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P1, %rax ; \
mulq %rax; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq %r8, 496(%rsp) ; \
movq 0x8+P1, %rax ; \
mulq 0x40+P1; \
xorq %r11, %r11 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x10+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r11 ; \
movq 0x18+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r11 ; \
movq 0x20+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r11 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r11, %r11 ; \
addq %rbx, %r9 ; \
adcq %rbp, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x10+P1, %rax ; \
mulq 0x40+P1; \
xorq %r12, %r12 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x18+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r12 ; \
movq 0x20+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r12 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r12, %r12 ; \
addq %rbx, %r10 ; \
adcq %rbp, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x28+P1, %rax ; \
mulq %rax; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x18+P1, %rax ; \
mulq 0x40+P1; \
xorq %r13, %r13 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x20+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r13 ; \
movq 0x28+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r13 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r13, %r13 ; \
addq %rbx, %r11 ; \
adcq %rbp, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x20+P1, %rax ; \
mulq 0x40+P1; \
xorq %r14, %r14 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x28+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r14 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r14, %r14 ; \
addq %rbx, %r12 ; \
adcq %rbp, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x30+P1, %rax ; \
mulq %rax; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x28+P1, %rax ; \
mulq 0x40+P1; \
xorq %r15, %r15 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x30+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r15 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r15, %r15 ; \
addq %rbx, %r13 ; \
adcq %rbp, %r14 ; \
adcq $0x0, %r15 ; \
xorq %r8, %r8 ; \
movq 0x38+P1, %rax ; \
mulq %rax; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x30+P1, %rax ; \
mulq 0x40+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r8 ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x40+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
imulq %rax, %rax ; \
addq %r8, %rax ; \
movq 496(%rsp), %r8 ; \
movq %r8, %rdx ; \
andq $0x1ff, %rdx ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rax, %r15 ; \
shrq $0x9, %rax ; \
addq %rax, %rdx ; \
stc; \
adcq 432(%rsp), %r8 ; \
adcq 440(%rsp), %r9 ; \
adcq 448(%rsp), %r10 ; \
adcq 456(%rsp), %r11 ; \
adcq 464(%rsp), %r12 ; \
adcq 472(%rsp), %r13 ; \
adcq 480(%rsp), %r14 ; \
adcq 488(%rsp), %r15 ; \
adcq $0xfffffffffffffe00, %rdx ; \
cmc; \
sbbq $0x0, %r8 ; \
movq %r8, P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0 ; \
sbbq $0x0, %r14 ; \
movq %r14, 0x30+P0 ; \
sbbq $0x0, %r15 ; \
movq %r15, 0x38+P0 ; \
sbbq $0x0, %rdx ; \
andq $0x1ff, %rdx ; \
movq %rdx, 0x40+P0 ; \
// Corresponds exactly to bignum_sub_p521
#define sub_p521(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rdx ; \
sbbq 0x8+P2, %rdx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
sbbq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
sbbq 0x28+P2, %r11 ; \
movq 0x30+P1, %r12 ; \
sbbq 0x30+P2, %r12 ; \
movq 0x38+P1, %r13 ; \
sbbq 0x38+P2, %r13 ; \
movq 0x40+P1, %r14 ; \
sbbq 0x40+P2, %r14 ; \
sbbq $0x0, %rax ; \
movq %rax, P0 ; \
sbbq $0x0, %rdx ; \
movq %rdx, 0x8+P0 ; \
sbbq $0x0, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x30+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x38+P0 ; \
sbbq $0x0, %r14 ; \
andq $0x1ff, %r14 ; \
movq %r14, 0x40+P0
// Additional macros to help with final multiplexing
#define testzero9(P) \
movq P, %rax ; \
movq 8+P, %rbx ; \
movq 16+P, %rdx ; \
movq 24+P, %rbp ; \
orq 32+P, %rax ; \
orq 40+P, %rbx ; \
orq 48+P, %rdx ; \
orq 56+P, %rbp ; \
orq %rbx, %rax ; \
orq %rbp, %rdx ; \
orq 64+P, %rax ; \
orq %rdx, %rax
#define mux9(P0,PNE,PEQ) \
movq PNE, %rax ; \
movq PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, P0 ; \
movq 8+PNE, %rax ; \
movq 8+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 8+P0 ; \
movq 16+PNE, %rax ; \
movq 16+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 16+P0 ; \
movq 24+PNE, %rax ; \
movq 24+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 24+P0 ; \
movq 32+PNE, %rax ; \
movq 32+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 32+P0 ; \
movq 40+PNE, %rax ; \
movq 40+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 40+P0 ; \
movq 48+PNE, %rax ; \
movq 48+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 48+P0 ; \
movq 56+PNE, %rax ; \
movq 56+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 56+P0 ; \
movq 64+PNE, %rax ; \
movq 64+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 64+P0
#define mux9c(P0,PNE) \
movq PNE, %rax ; \
movl $1, %ebx ; \
cmovzq %rbx, %rax ; \
movq %rax, P0 ; \
movq 8+PNE, %rax ; \
movl $0, %ebx ; \
cmovzq %rbx, %rax ; \
movq %rax, 8+P0 ; \
movq 16+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 16+P0 ; \
movq 24+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 24+P0 ; \
movq 32+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 32+P0 ; \
movq 40+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 40+P0 ; \
movq 48+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 48+P0 ; \
movq 56+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 56+P0 ; \
movq 64+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 64+P0
#define copy9(P0,P1) \
movq P1, %rax ; \
movq %rax, P0 ; \
movq 8+P1, %rax ; \
movq %rax, 8+P0 ; \
movq 16+P1, %rax ; \
movq %rax, 16+P0 ; \
movq 24+P1, %rax ; \
movq %rax, 24+P0 ; \
movq 32+P1, %rax ; \
movq %rax, 32+P0 ; \
movq 40+P1, %rax ; \
movq %rax, 40+P0 ; \
movq 48+P1, %rax ; \
movq %rax, 48+P0 ; \
movq 56+P1, %rax ; \
movq %rax, 56+P0 ; \
movq 64+P1, %rax ; \
movq %rax, 64+P0
S2N_BN_SYMBOL(p521_jmixadd_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Move the input arguments to stable places (two are already there)
movq %rdx, input_y
// Main code, just a sequence of basic field operations
sqr_p521(zp2,z_1)
mul_p521(y2a,z_1,y_2)
mul_p521(x2a,zp2,x_2)
mul_p521(y2a,zp2,y2a)
sub_p521(xd,x2a,x_1)
sub_p521(yd,y2a,y_1)
sqr_p521(zz,xd)
sqr_p521(ww,yd)
mul_p521(zzx1,zz,x_1)
mul_p521(zzx2,zz,x2a)
sub_p521(resx,ww,zzx1)
sub_p521(t1,zzx2,zzx1)
mul_p521(resz,xd,z_1)
sub_p521(resx,resx,zzx2)
sub_p521(t2,zzx1,resx)
mul_p521(t1,t1,y_1)
mul_p521(t2,yd,t2)
sub_p521(resy,t2,t1)
// Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence)
testzero9(z_1)
// Multiplex: if p1 <> 0 just copy the computed result from the staging area.
// If p1 = 0 then return the point p2 augmented with an extra z = 1
// coordinate, hence giving 0 + p2 = p2 for the final result.
mux9 (resx,resx,x_2)
mux9 (resy,resy,y_2)
copy9(x_3,resx)
copy9(y_3,resy)
mux9c(z_3,resz)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p521_jmixadd_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 67,306
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/p521_jdouble.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jdouble(uint64_t p3[static 27], const uint64_t p1[static 27]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input point are fully
// reduced mod p_521 and that the z coordinate is not zero.
//
// Standard x86-64 ABI: RDI = p3, RSI = p1
// Microsoft x64 ABI: RCX = p3, RDX = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jdouble)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jdouble)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jdouble)
.text
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
// This is actually where they come in anyway and they stay there.
#define input_z %rdi
#define input_x %rsi
// Pointer-offset pairs for inputs and outputs
#define x_1 0(input_x)
#define y_1 NUMSIZE(input_x)
#define z_1 (2*NUMSIZE)(input_x)
#define x_3 0(input_z)
#define y_3 NUMSIZE(input_z)
#define z_3 (2*NUMSIZE)(input_z)
// Pointer-offset pairs for temporaries, with some aliasing
// The tmp field is internal storage for field mul and sqr.
// NSPACE is the total stack needed for these temporaries
#define z2 (NUMSIZE*0)(%rsp)
#define y2 (NUMSIZE*1)(%rsp)
#define x2p (NUMSIZE*2)(%rsp)
#define xy2 (NUMSIZE*3)(%rsp)
#define y4 (NUMSIZE*4)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define dx2 (NUMSIZE*5)(%rsp)
#define t1 (NUMSIZE*5)(%rsp)
#define d (NUMSIZE*6)(%rsp)
#define x4p (NUMSIZE*6)(%rsp)
#define tmp (NUMSIZE*7)(%rsp)
#define NSPACE 568
// Corresponds exactly to bignum_mul_p521
#define mul_p521(P0,P1,P2) \
xorl %ecx, %ecx ; \
movq P2, %rdx ; \
mulxq P1, %r8, %r9 ; \
movq %r8, 504(%rsp) ; \
mulxq 0x8+P1, %rbx, %r10 ; \
adcq %rbx, %r9 ; \
mulxq 0x10+P1, %rbx, %r11 ; \
adcq %rbx, %r10 ; \
mulxq 0x18+P1, %rbx, %r12 ; \
adcq %rbx, %r11 ; \
mulxq 0x20+P1, %rbx, %r13 ; \
adcq %rbx, %r12 ; \
mulxq 0x28+P1, %rbx, %r14 ; \
adcq %rbx, %r13 ; \
mulxq 0x30+P1, %rbx, %r15 ; \
adcq %rbx, %r14 ; \
mulxq 0x38+P1, %rbx, %r8 ; \
adcq %rbx, %r15 ; \
adcq %rcx, %r8 ; \
movq 0x8+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
movq %r9, 512(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x38+P1, %rax, %r9 ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
adcq %rcx, %r9 ; \
movq 0x10+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
movq %r10, 520(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x38+P1, %rax, %r10 ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
adcq %rcx, %r10 ; \
movq 0x18+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq %r11, 528(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x38+P1, %rax, %r11 ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
adcq %rcx, %r11 ; \
movq 0x20+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
movq %r12, 536(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x38+P1, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
adcq %rcx, %r12 ; \
movq 0x28+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
movq %r13, 544(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x38+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcq %rcx, %r13 ; \
movq 0x30+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
movq %r14, 552(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x38+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
adcq %rcx, %r14 ; \
movq 0x38+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
movq %r15, 560(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x38+P1, %rax, %r15 ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
adcq %rcx, %r15 ; \
movq 0x40+P1, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P2, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x8+P2, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x10+P2, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x18+P2, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x20+P2, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x28+P2, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x30+P2, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x38+P2, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rcx, %rbx ; \
adcq %rbx, %rcx ; \
movq 0x40+P2, %rdx ; \
xorl %eax, %eax ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x38+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %rcx ; \
mulxq 0x40+P1, %rax, %rbx ; \
adcq %rax, %rcx ; \
movq %r8, %rax ; \
andq $0x1ff, %rax ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rcx, %r15 ; \
shrq $0x9, %rcx ; \
addq %rax, %rcx ; \
stc; \
adcq 504(%rsp), %r8 ; \
adcq 512(%rsp), %r9 ; \
adcq 520(%rsp), %r10 ; \
adcq 528(%rsp), %r11 ; \
adcq 536(%rsp), %r12 ; \
adcq 544(%rsp), %r13 ; \
adcq 552(%rsp), %r14 ; \
adcq 560(%rsp), %r15 ; \
adcq $0xfffffffffffffe00, %rcx ; \
cmc; \
sbbq $0x0, %r8 ; \
movq %r8, P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0 ; \
sbbq $0x0, %r14 ; \
movq %r14, 0x30+P0 ; \
sbbq $0x0, %r15 ; \
movq %r15, 0x38+P0 ; \
sbbq $0x0, %rcx ; \
andq $0x1ff, %rcx ; \
movq %rcx, 0x40+P0
// Corresponds exactly to bignum_sqr_p521
#define sqr_p521(P0,P1) \
xorl %ecx, %ecx ; \
movq P1, %rdx ; \
mulxq 0x8+P1, %r9, %rax ; \
movq %r9, 512(%rsp) ; \
mulxq 0x10+P1, %r10, %rbx ; \
adcxq %rax, %r10 ; \
movq %r10, 520(%rsp) ; \
mulxq 0x18+P1, %r11, %rax ; \
adcxq %rbx, %r11 ; \
mulxq 0x20+P1, %r12, %rbx ; \
adcxq %rax, %r12 ; \
mulxq 0x28+P1, %r13, %rax ; \
adcxq %rbx, %r13 ; \
mulxq 0x30+P1, %r14, %rbx ; \
adcxq %rax, %r14 ; \
mulxq 0x38+P1, %r15, %r8 ; \
adcxq %rbx, %r15 ; \
adcxq %rcx, %r8 ; \
xorl %ecx, %ecx ; \
movq 0x8+P1, %rdx ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq %r11, 528(%rsp) ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
movq %r12, 536(%rsp) ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x38+P1, %rax, %r9 ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
movq 0x20+P1, %rdx ; \
mulxq 0x28+P1, %rax, %r10 ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
adcxq %rcx, %r10 ; \
xorl %ecx, %ecx ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
movq %r13, 544(%rsp) ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
movq %r14, 552(%rsp) ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x38+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
movq 0x30+P1, %rdx ; \
mulxq 0x20+P1, %rax, %r11 ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 0x28+P1, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
adcxq %rcx, %r12 ; \
xorl %ecx, %ecx ; \
movq 0x18+P1, %rdx ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
movq %r15, 560(%rsp) ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x38+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
movq 0x38+P1, %rdx ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x28+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
mulxq 0x30+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
adcxq %rcx, %r14 ; \
xorl %ecx, %ecx ; \
movq P1, %rdx ; \
mulxq %rdx, %rax, %rbx ; \
movq %rax, 504(%rsp) ; \
movq 512(%rsp), %rax ; \
adcxq %rax, %rax ; \
adoxq %rbx, %rax ; \
movq %rax, 512(%rsp) ; \
movq 520(%rsp), %rax ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %rax, %rax ; \
adoxq %rdx, %rax ; \
movq %rax, 520(%rsp) ; \
movq 528(%rsp), %rax ; \
adcxq %rax, %rax ; \
adoxq %rbx, %rax ; \
movq %rax, 528(%rsp) ; \
movq 536(%rsp), %rax ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %rax, %rax ; \
adoxq %rdx, %rax ; \
movq %rax, 536(%rsp) ; \
movq 544(%rsp), %rax ; \
adcxq %rax, %rax ; \
adoxq %rbx, %rax ; \
movq %rax, 544(%rsp) ; \
movq 552(%rsp), %rax ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %rax, %rax ; \
adoxq %rdx, %rax ; \
movq %rax, 552(%rsp) ; \
movq 560(%rsp), %rax ; \
adcxq %rax, %rax ; \
adoxq %rbx, %rax ; \
movq %rax, 560(%rsp) ; \
movq 0x20+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %r8, %r8 ; \
adoxq %rdx, %r8 ; \
adcxq %r9, %r9 ; \
adoxq %rbx, %r9 ; \
movq 0x28+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %r10, %r10 ; \
adoxq %rdx, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rbx, %r11 ; \
movq 0x30+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %r12, %r12 ; \
adoxq %rdx, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rbx, %r13 ; \
movq 0x38+P1, %rdx ; \
mulxq %rdx, %rdx, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rdx, %r14 ; \
adcxq %rcx, %r15 ; \
adoxq %rcx, %r15 ; \
movq 0x40+P1, %rdx ; \
movq %rdx, %rcx ; \
imulq %rcx, %rcx ; \
addq %rdx, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x38+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %rcx ; \
adcq $0x0, %rcx ; \
movq %r8, %rax ; \
andq $0x1ff, %rax ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rcx, %r15 ; \
shrq $0x9, %rcx ; \
addq %rax, %rcx ; \
stc; \
adcq 504(%rsp), %r8 ; \
adcq 512(%rsp), %r9 ; \
adcq 520(%rsp), %r10 ; \
adcq 528(%rsp), %r11 ; \
adcq 536(%rsp), %r12 ; \
adcq 544(%rsp), %r13 ; \
adcq 552(%rsp), %r14 ; \
adcq 560(%rsp), %r15 ; \
adcq $0xfffffffffffffe00, %rcx ; \
cmc; \
sbbq $0x0, %r8 ; \
movq %r8, P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0 ; \
sbbq $0x0, %r14 ; \
movq %r14, 0x30+P0 ; \
sbbq $0x0, %r15 ; \
movq %r15, 0x38+P0 ; \
sbbq $0x0, %rcx ; \
andq $0x1ff, %rcx ; \
movq %rcx, 0x40+P0
// Corresponds exactly to bignum_add_p521
#define add_p521(P0,P1,P2) \
stc; \
movq P1, %rax ; \
adcq P2, %rax ; \
movq 0x8+P1, %rbx ; \
adcq 0x8+P2, %rbx ; \
movq 0x10+P1, %r8 ; \
adcq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
adcq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
adcq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
adcq 0x28+P2, %r11 ; \
movq 0x30+P1, %r12 ; \
adcq 0x30+P2, %r12 ; \
movq 0x38+P1, %r13 ; \
adcq 0x38+P2, %r13 ; \
movq 0x40+P1, %r14 ; \
adcq 0x40+P2, %r14 ; \
movq $0x200, %rdx ; \
andq %r14, %rdx ; \
cmpq $0x200, %rdx ; \
sbbq $0x0, %rax ; \
movq %rax, P0 ; \
sbbq $0x0, %rbx ; \
movq %rbx, 0x8+P0 ; \
sbbq $0x0, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x30+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x38+P0 ; \
sbbq %rdx, %r14 ; \
movq %r14, 0x40+P0
// Corresponds exactly to bignum_sub_p521
#define sub_p521(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rdx ; \
sbbq 0x8+P2, %rdx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
sbbq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
sbbq 0x28+P2, %r11 ; \
movq 0x30+P1, %r12 ; \
sbbq 0x30+P2, %r12 ; \
movq 0x38+P1, %r13 ; \
sbbq 0x38+P2, %r13 ; \
movq 0x40+P1, %r14 ; \
sbbq 0x40+P2, %r14 ; \
sbbq $0x0, %rax ; \
movq %rax, P0 ; \
sbbq $0x0, %rdx ; \
movq %rdx, 0x8+P0 ; \
sbbq $0x0, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x30+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x38+P0 ; \
sbbq $0x0, %r14 ; \
andq $0x1ff, %r14 ; \
movq %r14, 0x40+P0
// Weak multiplication not fully reducing
#define weakmul_p521(P0,P1,P2) \
xorl %ecx, %ecx ; \
movq P2, %rdx ; \
mulxq P1, %r8, %r9 ; \
movq %r8, 504(%rsp) ; \
mulxq 0x8+P1, %rbx, %r10 ; \
adcq %rbx, %r9 ; \
mulxq 0x10+P1, %rbx, %r11 ; \
adcq %rbx, %r10 ; \
mulxq 0x18+P1, %rbx, %r12 ; \
adcq %rbx, %r11 ; \
mulxq 0x20+P1, %rbx, %r13 ; \
adcq %rbx, %r12 ; \
mulxq 0x28+P1, %rbx, %r14 ; \
adcq %rbx, %r13 ; \
mulxq 0x30+P1, %rbx, %r15 ; \
adcq %rbx, %r14 ; \
mulxq 0x38+P1, %rbx, %r8 ; \
adcq %rbx, %r15 ; \
adcq %rcx, %r8 ; \
movq 0x8+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
movq %r9, 512(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x38+P1, %rax, %r9 ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
adcq %rcx, %r9 ; \
movq 0x10+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
movq %r10, 520(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x38+P1, %rax, %r10 ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
adcq %rcx, %r10 ; \
movq 0x18+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq %r11, 528(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x38+P1, %rax, %r11 ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
adcq %rcx, %r11 ; \
movq 0x20+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
movq %r12, 536(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x38+P1, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
adcq %rcx, %r12 ; \
movq 0x28+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
movq %r13, 544(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x38+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
adcq %rcx, %r13 ; \
movq 0x30+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
movq %r14, 552(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x38+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
adcq %rcx, %r14 ; \
movq 0x38+P2, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
movq %r15, 560(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x38+P1, %rax, %r15 ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
adcq %rcx, %r15 ; \
movq 0x40+P1, %rdx ; \
xorl %ecx, %ecx ; \
mulxq P2, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x8+P2, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x10+P2, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x18+P2, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x20+P2, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x28+P2, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x30+P2, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x38+P2, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rcx, %rbx ; \
adcq %rbx, %rcx ; \
movq 0x40+P2, %rdx ; \
xorl %eax, %eax ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x38+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %rcx ; \
mulxq 0x40+P1, %rax, %rbx ; \
adcq %rax, %rcx ; \
movq %r8, %rax ; \
andq $0x1ff, %rax ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rcx, %r15 ; \
shrq $0x9, %rcx ; \
addq %rax, %rcx ; \
addq 504(%rsp), %r8 ; \
movq %r8, P0 ; \
adcq 512(%rsp), %r9 ; \
movq %r9, 0x8+P0 ; \
adcq 520(%rsp), %r10 ; \
movq %r10, 0x10+P0 ; \
adcq 528(%rsp), %r11 ; \
movq %r11, 0x18+P0 ; \
adcq 536(%rsp), %r12 ; \
movq %r12, 0x20+P0 ; \
adcq 544(%rsp), %r13 ; \
movq %r13, 0x28+P0 ; \
adcq 552(%rsp), %r14 ; \
movq %r14, 0x30+P0 ; \
adcq 560(%rsp), %r15 ; \
movq %r15, 0x38+P0 ; \
adcq $0, %rcx ; \
movq %rcx, 0x40+P0
// P0 = C * P1 - D * P2 == C * P1 + D * (p_521 - P2)
#define cmsub_p521(P0,C,P1,D,P2) \
movq $D, %rdx ; \
movq 64+P2, %rbx ; \
xorq $0x1FF, %rbx ; \
movq P2, %rax ; \
notq %rax; \
mulxq %rax, %r8, %r9 ; \
movq 8+P2, %rax ; \
notq %rax; \
mulxq %rax, %rax, %r10 ; \
addq %rax, %r9 ; \
movq 16+P2, %rax ; \
notq %rax; \
mulxq %rax, %rax, %r11 ; \
adcq %rax, %r10 ; \
movq 24+P2, %rax ; \
notq %rax; \
mulxq %rax, %rax, %r12 ; \
adcq %rax, %r11 ; \
movq 32+P2, %rax ; \
notq %rax; \
mulxq %rax, %rax, %r13 ; \
adcq %rax, %r12 ; \
movq 40+P2, %rax ; \
notq %rax; \
mulxq %rax, %rax, %r14 ; \
adcq %rax, %r13 ; \
movq 48+P2, %rax ; \
notq %rax; \
mulxq %rax, %rax, %r15 ; \
adcq %rax, %r14 ; \
movq 56+P2, %rax ; \
notq %rax; \
mulxq %rax, %rax, %rcx ; \
adcq %rax, %r15 ; \
mulxq %rbx, %rbx, %rax ; \
adcq %rcx, %rbx ; \
xorl %eax, %eax ; \
movq $C, %rdx ; \
mulxq P1, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
mulxq 8+P1, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
mulxq 16+P1, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 24+P1, %rax, %rcx ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
mulxq 32+P1, %rax, %rcx ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
mulxq 40+P1, %rax, %rcx ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
mulxq 48+P1, %rax, %rcx ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
mulxq 56+P1, %rax, %rcx ; \
adcxq %rax, %r15 ; \
adoxq %rcx, %rbx ; \
mulxq 64+P1, %rax, %rcx ; \
adcxq %rax, %rbx ; \
movq %r9, %rax ; \
andq %r10, %rax ; \
andq %r11, %rax ; \
andq %r12, %rax ; \
andq %r13, %rax ; \
andq %r14, %rax ; \
andq %r15, %rax ; \
movq %rbx, %rdx ; \
shrq $9, %rdx ; \
orq $~0x1FF, %rbx ; \
leaq 1(%rdx), %rcx ; \
addq %r8, %rcx ; \
movl $0, %ecx ; \
adcq %rcx, %rax ; \
movq %rbx, %rax ; \
adcq %rcx, %rax ; \
adcq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rcx, %r9 ; \
movq %r9, 8+P0 ; \
adcq %rcx, %r10 ; \
movq %r10, 16+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 24+P0 ; \
adcq %rcx, %r12 ; \
movq %r12, 32+P0 ; \
adcq %rcx, %r13 ; \
movq %r13, 40+P0 ; \
adcq %rcx, %r14 ; \
movq %r14, 48+P0 ; \
adcq %rcx, %r15 ; \
movq %r15, 56+P0 ; \
adcq %rcx, %rbx ; \
andq $0x1FF, %rbx ; \
movq %rbx, 64+P0
// P0 = 3 * P1 - 8 * P2 == 3 * P1 + 8 * (p_521 - P2)
#define cmsub38_p521(P0,P1,P2) \
movq 64+P2, %rbx ; \
xorq $0x1FF, %rbx ; \
movq 56+P2, %r15 ; \
notq %r15; \
shldq $3, %r15, %rbx ; \
movq 48+P2, %r14 ; \
notq %r14; \
shldq $3, %r14, %r15 ; \
movq 40+P2, %r13 ; \
notq %r13; \
shldq $3, %r13, %r14 ; \
movq 32+P2, %r12 ; \
notq %r12; \
shldq $3, %r12, %r13 ; \
movq 24+P2, %r11 ; \
notq %r11; \
shldq $3, %r11, %r12 ; \
movq 16+P2, %r10 ; \
notq %r10; \
shldq $3, %r10, %r11 ; \
movq 8+P2, %r9 ; \
notq %r9; \
shldq $3, %r9, %r10 ; \
movq P2, %r8 ; \
notq %r8; \
shldq $3, %r8, %r9 ; \
shlq $3, %r8 ; \
movq $3, %rdx ; \
xorl %eax, %eax ; \
mulxq P1, %rax, %rcx ; \
adcxq %rax, %r8 ; \
adoxq %rcx, %r9 ; \
mulxq 8+P1, %rax, %rcx ; \
adcxq %rax, %r9 ; \
adoxq %rcx, %r10 ; \
mulxq 16+P1, %rax, %rcx ; \
adcxq %rax, %r10 ; \
adoxq %rcx, %r11 ; \
mulxq 24+P1, %rax, %rcx ; \
adcxq %rax, %r11 ; \
adoxq %rcx, %r12 ; \
mulxq 32+P1, %rax, %rcx ; \
adcxq %rax, %r12 ; \
adoxq %rcx, %r13 ; \
mulxq 40+P1, %rax, %rcx ; \
adcxq %rax, %r13 ; \
adoxq %rcx, %r14 ; \
mulxq 48+P1, %rax, %rcx ; \
adcxq %rax, %r14 ; \
adoxq %rcx, %r15 ; \
mulxq 56+P1, %rax, %rcx ; \
adcxq %rax, %r15 ; \
adoxq %rcx, %rbx ; \
mulxq 64+P1, %rax, %rcx ; \
adcxq %rax, %rbx ; \
movq %r9, %rax ; \
andq %r10, %rax ; \
andq %r11, %rax ; \
andq %r12, %rax ; \
andq %r13, %rax ; \
andq %r14, %rax ; \
andq %r15, %rax ; \
movq %rbx, %rdx ; \
shrq $9, %rdx ; \
orq $~0x1FF, %rbx ; \
leaq 1(%rdx), %rcx ; \
addq %r8, %rcx ; \
movl $0, %ecx ; \
adcq %rcx, %rax ; \
movq %rbx, %rax ; \
adcq %rcx, %rax ; \
adcq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rcx, %r9 ; \
movq %r9, 8+P0 ; \
adcq %rcx, %r10 ; \
movq %r10, 16+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 24+P0 ; \
adcq %rcx, %r12 ; \
movq %r12, 32+P0 ; \
adcq %rcx, %r13 ; \
movq %r13, 40+P0 ; \
adcq %rcx, %r14 ; \
movq %r14, 48+P0 ; \
adcq %rcx, %r15 ; \
movq %r15, 56+P0 ; \
adcq %rcx, %rbx ; \
andq $0x1FF, %rbx ; \
movq %rbx, 64+P0
// P0 = 4 * P1 - P2 = 4 * P1 + (p_521 - P2)
#define cmsub41_p521(P0,P1,P2) \
movq 64+P1, %rbx ; \
movq 56+P1, %r15 ; \
shldq $2, %r15, %rbx ; \
movq 48+P1, %r14 ; \
shldq $2, %r14, %r15 ; \
movq 40+P1, %r13 ; \
shldq $2, %r13, %r14 ; \
movq 32+P1, %r12 ; \
shldq $2, %r12, %r13 ; \
movq 24+P1, %r11 ; \
shldq $2, %r11, %r12 ; \
movq 16+P1, %r10 ; \
shldq $2, %r10, %r11 ; \
movq 8+P1, %r9 ; \
shldq $2, %r9, %r10 ; \
movq P1, %r8 ; \
shldq $2, %r8, %r9 ; \
shlq $2, %r8 ; \
movq 64+P2, %rcx ; \
xorq $0x1FF, %rcx ; \
movq P2, %rax ; \
notq %rax; \
addq %rax, %r8 ; \
movq 8+P2, %rax ; \
notq %rax; \
adcq %rax, %r9 ; \
movq 16+P2, %rax ; \
notq %rax; \
adcq %rax, %r10 ; \
movq 24+P2, %rax ; \
notq %rax; \
adcq %rax, %r11 ; \
movq 32+P2, %rax ; \
notq %rax; \
adcq %rax, %r12 ; \
movq 40+P2, %rax ; \
notq %rax; \
adcq %rax, %r13 ; \
movq 48+P2, %rax ; \
notq %rax; \
adcq %rax, %r14 ; \
movq 56+P2, %rax ; \
notq %rax; \
adcq %rax, %r15 ; \
adcq %rcx, %rbx ; \
movq %r9, %rax ; \
andq %r10, %rax ; \
andq %r11, %rax ; \
andq %r12, %rax ; \
andq %r13, %rax ; \
andq %r14, %rax ; \
andq %r15, %rax ; \
movq %rbx, %rdx ; \
shrq $9, %rdx ; \
orq $~0x1FF, %rbx ; \
leaq 1(%rdx), %rcx ; \
addq %r8, %rcx ; \
movl $0, %ecx ; \
adcq %rcx, %rax ; \
movq %rbx, %rax ; \
adcq %rcx, %rax ; \
adcq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rcx, %r9 ; \
movq %r9, 8+P0 ; \
adcq %rcx, %r10 ; \
movq %r10, 16+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 24+P0 ; \
adcq %rcx, %r12 ; \
movq %r12, 32+P0 ; \
adcq %rcx, %r13 ; \
movq %r13, 40+P0 ; \
adcq %rcx, %r14 ; \
movq %r14, 48+P0 ; \
adcq %rcx, %r15 ; \
movq %r15, 56+P0 ; \
adcq %rcx, %rbx ; \
andq $0x1FF, %rbx ; \
movq %rbx, 64+P0
S2N_BN_SYMBOL(p521_jdouble):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room on stack for temporary variables
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Main code, just a sequence of basic field operations
// z2 = z^2
// y2 = y^2
sqr_p521(z2,z_1)
sqr_p521(y2,y_1)
// x2p = x^2 - z^4 = (x + z^2) * (x - z^2)
add_p521(t1,x_1,z2)
sub_p521(t2,x_1,z2)
mul_p521(x2p,t1,t2)
// t1 = y + z
// x4p = x2p^2
// xy2 = x * y^2
add_p521(t1,y_1,z_1)
sqr_p521(x4p,x2p)
weakmul_p521(xy2,x_1,y2)
// t2 = (y + z)^2
sqr_p521(t2,t1)
// d = 12 * xy2 - 9 * x4p
// t1 = y^2 + 2 * y * z
cmsub_p521(d,12,xy2,9,x4p)
sub_p521(t1,t2,z2)
// y4 = y^4
sqr_p521(y4,y2)
// z_3' = 2 * y * z
// dx2 = d * x2p
sub_p521(z_3,t1,y2)
weakmul_p521(dx2,d,x2p)
// x' = 4 * xy2 - d
cmsub41_p521(x_3,xy2,d)
// y' = 3 * dx2 - 8 * y4
cmsub38_p521(y_3,dx2,y4)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p521_jdouble)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 11,223
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_mul_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply modulo p_521, z := (x * y) mod p_521, assuming x and y reduced
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_mul_p521(uint64_t z[static 9], const uint64_t x[static 9],
// const uint64_t y[static 9]);
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p521)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p521)
.text
#define z %rdi
#define x %rsi
// Copied in
#define y %rcx
// mulpadd (high,low,x) adds rdx * x to a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax and %rbx as temporaries.
#define mulpadd(high,low,x) \
mulxq x, %rax, %rbx ; \
adcxq %rax, low ; \
adoxq %rbx, high
S2N_BN_SYMBOL(bignum_mul_p521):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save more registers to play with and make temporary space on stack
CFI_PUSH(%rbp)
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(64)
// Copy y into a safe register to start with
movq %rdx, y
// Clone of the main body of bignum_8_16, writing back the low 8 words to
// the temporary buffer on the stack and keeping the top half in %r15,...,%r8
xorl %ebp, %ebp
movq (y), %rdx
mulxq (x), %r8, %r9
movq %r8, (%rsp)
mulxq 0x8(x), %rbx, %r10
adcq %rbx, %r9
mulxq 0x10(x), %rbx, %r11
adcq %rbx, %r10
mulxq 0x18(x), %rbx, %r12
adcq %rbx, %r11
mulxq 0x20(x), %rbx, %r13
adcq %rbx, %r12
mulxq 0x28(x), %rbx, %r14
adcq %rbx, %r13
mulxq 0x30(x), %rbx, %r15
adcq %rbx, %r14
mulxq 0x38(x), %rbx, %r8
adcq %rbx, %r15
adcq %rbp, %r8
movq 0x8(y), %rdx
xorl %ebp, %ebp
mulxq (x), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
movq %r9, 0x8(%rsp)
mulxq 0x8(x), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x10(x), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x18(x), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x20(x), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x28(x), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x30(x), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x38(x), %rax, %r9
adcxq %rax, %r8
adoxq %rbp, %r9
adcq %rbp, %r9
movq 0x10(y), %rdx
xorl %ebp, %ebp
mulxq (x), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
movq %r10, 0x10(%rsp)
mulxq 0x8(x), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x10(x), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x18(x), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x20(x), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x28(x), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x30(x), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x38(x), %rax, %r10
adcxq %rax, %r9
adoxq %rbp, %r10
adcq %rbp, %r10
movq 0x18(y), %rdx
xorl %ebp, %ebp
mulxq (x), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
movq %r11, 0x18(%rsp)
mulxq 0x8(x), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x10(x), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x18(x), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x20(x), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x28(x), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x30(x), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x38(x), %rax, %r11
adcxq %rax, %r10
adoxq %rbp, %r11
adcq %rbp, %r11
movq 0x20(y), %rdx
xorl %ebp, %ebp
mulxq (x), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
movq %r12, 0x20(%rsp)
mulxq 0x8(x), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x10(x), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x18(x), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x20(x), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x28(x), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x30(x), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x38(x), %rax, %r12
adcxq %rax, %r11
adoxq %rbp, %r12
adcq %rbp, %r12
movq 0x28(y), %rdx
xorl %ebp, %ebp
mulxq (x), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
movq %r13, 0x28(%rsp)
mulxq 0x8(x), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
mulxq 0x10(x), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x18(x), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x20(x), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x28(x), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x30(x), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x38(x), %rax, %r13
adcxq %rax, %r12
adoxq %rbp, %r13
adcq %rbp, %r13
movq 0x30(y), %rdx
xorl %ebp, %ebp
mulxq (x), %rax, %rbx
adcxq %rax, %r14
adoxq %rbx, %r15
movq %r14, 0x30(%rsp)
mulxq 0x8(x), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
mulxq 0x10(x), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x18(x), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x20(x), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x28(x), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x30(x), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x38(x), %rax, %r14
adcxq %rax, %r13
adoxq %rbp, %r14
adcq %rbp, %r14
movq 0x38(y), %rdx
xorl %ebp, %ebp
mulxq (x), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %r8
movq %r15, 0x38(%rsp)
mulxq 0x8(x), %rax, %rbx
adcxq %rax, %r8
adoxq %rbx, %r9
mulxq 0x10(x), %rax, %rbx
adcxq %rax, %r9
adoxq %rbx, %r10
mulxq 0x18(x), %rax, %rbx
adcxq %rax, %r10
adoxq %rbx, %r11
mulxq 0x20(x), %rax, %rbx
adcxq %rax, %r11
adoxq %rbx, %r12
mulxq 0x28(x), %rax, %rbx
adcxq %rax, %r12
adoxq %rbx, %r13
mulxq 0x30(x), %rax, %rbx
adcxq %rax, %r13
adoxq %rbx, %r14
mulxq 0x38(x), %rax, %r15
adcxq %rax, %r14
adoxq %rbp, %r15
adcq %rbp, %r15
// Accumulate x[8] * y[0..7], extending the window to %rbp,%r15,...,%r8
movq 64(x), %rdx
xorl %ebp, %ebp
mulpadd(%r9,%r8,(y))
mulpadd(%r10,%r9,8(y))
mulpadd(%r11,%r10,16(y))
mulpadd(%r12,%r11,24(y))
mulpadd(%r13,%r12,32(y))
mulpadd(%r14,%r13,40(y))
mulpadd(%r15,%r14,48(y))
mulxq 56(y), %rax, %rbx
adcxq %rax, %r15
adoxq %rbp, %rbx
adcq %rbx, %rbp
// Accumulate y[8] * x[0..8] within this extended window %rbp,%r15,...,%r8
movq 64(y), %rdx
xorl %eax, %eax
mulpadd(%r9,%r8,(x))
mulpadd(%r10,%r9,8(x))
mulpadd(%r11,%r10,16(x))
mulpadd(%r12,%r11,24(x))
mulpadd(%r13,%r12,32(x))
mulpadd(%r14,%r13,40(x))
mulpadd(%r15,%r14,48(x))
mulxq 56(x), %rax, %rbx
adcxq %rax, %r15
adoxq %rbx, %rbp
mulxq 64(x), %rax, %rbx
adcq %rax, %rbp
// Rotate the upper portion right 9 bits since 2^512 == 2^-9 (mod p_521)
// Let rotated result %rbp,%r15,%r14,...,%r8 be h (high) and %rsp[0..7] be l (low)
movq %r8, %rax
andq $0x1FF, %rax
shrdq $9, %r9, %r8
shrdq $9, %r10, %r9
shrdq $9, %r11, %r10
shrdq $9, %r12, %r11
shrdq $9, %r13, %r12
shrdq $9, %r14, %r13
shrdq $9, %r15, %r14
shrdq $9, %rbp, %r15
shrq $9, %rbp
addq %rax, %rbp
// Force carry-in then add to get s = h + l + 1
// but actually add all 1s in the top 53 bits to get simple carry out
stc
adcq (%rsp), %r8
adcq 8(%rsp), %r9
adcq 16(%rsp), %r10
adcq 24(%rsp), %r11
adcq 32(%rsp), %r12
adcq 40(%rsp), %r13
adcq 48(%rsp), %r14
adcq 56(%rsp), %r15
adcq $~0x1FF, %rbp
// Now CF is set <=> h + l + 1 >= 2^521 <=> h + l >= p_521,
// in which case the lower 521 bits are already right. Otherwise if
// CF is clear, we want to subtract 1. Hence subtract the complement
// of the carry flag then mask the top word, which scrubs the
// padding in either case. Write digits back as they are created.
cmc
sbbq $0, %r8
movq %r8, (z)
sbbq $0, %r9
movq %r9, 8(z)
sbbq $0, %r10
movq %r10, 16(z)
sbbq $0, %r11
movq %r11, 24(z)
sbbq $0, %r12
movq %r12, 32(z)
sbbq $0, %r13
movq %r13, 40(z)
sbbq $0, %r14
movq %r14, 48(z)
sbbq $0, %r15
movq %r15, 56(z)
sbbq $0, %rbp
andq $0x1FF, %rbp
movq %rbp, 64(z)
// Restore registers and return
CFI_INC_RSP(64)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
CFI_POP(%rbp)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mul_p521)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 10,287
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_montmul_p521_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery multiply, z := (x * y / 2^576) mod p_521
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_montmul_p521_alt(uint64_t z[static 9],
// const uint64_t x[static 9],
// const uint64_t y[static 9]);
//
// Does z := (x * y / 2^576) mod p_521, assuming x < p_521, y < p_521. This
// means the Montgomery base is the "native size" 2^{9*64} = 2^576; since
// p_521 is a Mersenne prime the basic modular multiplication bignum_mul_p521
// can be considered a Montgomery operation to base 2^521.
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p521_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p521_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p521_alt)
.text
#define z %rdi
#define x %rsi
// This is moved from %rdx to free it for muls
#define y %rcx
// Macro for the key "multiply and add to (c,h,l)" step
#define combadd(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
// A minutely shorter form for when c = 0 initially
#define combadz(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq c, c
// A short form where we don't expect a top carry
#define combads(h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h
S2N_BN_SYMBOL(bignum_montmul_p521_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Make more registers available and make temporary space on stack
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(72)
// Copy y into a safe register to start with
movq %rdx, y
// Copy y into a safe register to start with
mov %rdx, y
// Start doing a conventional columnwise multiplication,
// temporarily storing the lower 9 digits to the stack.
// Start with result term 0
movq (x), %rax
mulq (y)
movq %rax, (%rsp)
movq %rdx, %r9
xorq %r10, %r10
// Result term 1
xorq %r11, %r11
combads(%r10,%r9,(x),8(y))
combadz(%r11,%r10,%r9,8(x),(y))
movq %r9, 8(%rsp)
// Result term 2
xorq %r12, %r12
combadz(%r12,%r11,%r10,(x),16(y))
combadd(%r12,%r11,%r10,8(x),8(y))
combadd(%r12,%r11,%r10,16(x),(y))
movq %r10, 16(%rsp)
// Result term 3
xorq %r13, %r13
combadz(%r13,%r12,%r11,(x),24(y))
combadd(%r13,%r12,%r11,8(x),16(y))
combadd(%r13,%r12,%r11,16(x),8(y))
combadd(%r13,%r12,%r11,24(x),(y))
movq %r11, 24(%rsp)
// Result term 4
xorq %r14, %r14
combadz(%r14,%r13,%r12,(x),32(y))
combadd(%r14,%r13,%r12,8(x),24(y))
combadd(%r14,%r13,%r12,16(x),16(y))
combadd(%r14,%r13,%r12,24(x),8(y))
combadd(%r14,%r13,%r12,32(x),(y))
movq %r12, 32(%rsp)
// Result term 5
xorq %r15, %r15
combadz(%r15,%r14,%r13,(x),40(y))
combadd(%r15,%r14,%r13,8(x),32(y))
combadd(%r15,%r14,%r13,16(x),24(y))
combadd(%r15,%r14,%r13,24(x),16(y))
combadd(%r15,%r14,%r13,32(x),8(y))
combadd(%r15,%r14,%r13,40(x),(y))
movq %r13, 40(%rsp)
// Result term 6
xorq %r8, %r8
combadz(%r8,%r15,%r14,(x),48(y))
combadd(%r8,%r15,%r14,8(x),40(y))
combadd(%r8,%r15,%r14,16(x),32(y))
combadd(%r8,%r15,%r14,24(x),24(y))
combadd(%r8,%r15,%r14,32(x),16(y))
combadd(%r8,%r15,%r14,40(x),8(y))
combadd(%r8,%r15,%r14,48(x),(y))
movq %r14, 48(%rsp)
// Result term 7
xorq %r9, %r9
combadz(%r9,%r8,%r15,(x),56(y))
combadd(%r9,%r8,%r15,8(x),48(y))
combadd(%r9,%r8,%r15,16(x),40(y))
combadd(%r9,%r8,%r15,24(x),32(y))
combadd(%r9,%r8,%r15,32(x),24(y))
combadd(%r9,%r8,%r15,40(x),16(y))
combadd(%r9,%r8,%r15,48(x),8(y))
combadd(%r9,%r8,%r15,56(x),(y))
movq %r15, 56(%rsp)
// Result term 8
xorq %r10, %r10
combadz(%r10,%r9,%r8,(x),64(y))
combadd(%r10,%r9,%r8,8(x),56(y))
combadd(%r10,%r9,%r8,16(x),48(y))
combadd(%r10,%r9,%r8,24(x),40(y))
combadd(%r10,%r9,%r8,32(x),32(y))
combadd(%r10,%r9,%r8,40(x),24(y))
combadd(%r10,%r9,%r8,48(x),16(y))
combadd(%r10,%r9,%r8,56(x),8(y))
combadd(%r10,%r9,%r8,64(x),(y))
movq %r8, 64(%rsp)
// At this point we suspend writing back results and collect them
// in a register window. Next is result term 9
xorq %r11, %r11
combadz(%r11,%r10,%r9,8(x),64(y))
combadd(%r11,%r10,%r9,16(x),56(y))
combadd(%r11,%r10,%r9,24(x),48(y))
combadd(%r11,%r10,%r9,32(x),40(y))
combadd(%r11,%r10,%r9,40(x),32(y))
combadd(%r11,%r10,%r9,48(x),24(y))
combadd(%r11,%r10,%r9,56(x),16(y))
combadd(%r11,%r10,%r9,64(x),8(y))
// Result term 10
xorq %r12, %r12
combadz(%r12,%r11,%r10,16(x),64(y))
combadd(%r12,%r11,%r10,24(x),56(y))
combadd(%r12,%r11,%r10,32(x),48(y))
combadd(%r12,%r11,%r10,40(x),40(y))
combadd(%r12,%r11,%r10,48(x),32(y))
combadd(%r12,%r11,%r10,56(x),24(y))
combadd(%r12,%r11,%r10,64(x),16(y))
// Result term 11
xorq %r13, %r13
combadz(%r13,%r12,%r11,24(x),64(y))
combadd(%r13,%r12,%r11,32(x),56(y))
combadd(%r13,%r12,%r11,40(x),48(y))
combadd(%r13,%r12,%r11,48(x),40(y))
combadd(%r13,%r12,%r11,56(x),32(y))
combadd(%r13,%r12,%r11,64(x),24(y))
// Result term 12
xorq %r14, %r14
combadz(%r14,%r13,%r12,32(x),64(y))
combadd(%r14,%r13,%r12,40(x),56(y))
combadd(%r14,%r13,%r12,48(x),48(y))
combadd(%r14,%r13,%r12,56(x),40(y))
combadd(%r14,%r13,%r12,64(x),32(y))
// Result term 13
xorq %r15, %r15
combadz(%r15,%r14,%r13,40(x),64(y))
combadd(%r15,%r14,%r13,48(x),56(y))
combadd(%r15,%r14,%r13,56(x),48(y))
combadd(%r15,%r14,%r13,64(x),40(y))
// Result term 14
xorq %r8, %r8
combadz(%r8,%r15,%r14,48(x),64(y))
combadd(%r8,%r15,%r14,56(x),56(y))
combadd(%r8,%r15,%r14,64(x),48(y))
// Result term 15
combads(%r8,%r15,56(x),64(y))
combads(%r8,%r15,64(x),56(y))
// Result term 16
movq 64(x), %rax
imulq 64(y), %rax
addq %r8, %rax
// Now the upper portion is [%rax;%r15;%r14;%r13;%r12;%r11;%r10;%r9;[%rsp+64]].
// Rotate the upper portion right 9 bits since 2^512 == 2^-9 (mod p_521)
// Let rotated result %rdx,%r15,%r14,...,%r8 be h (high) and %rsp[0..7] be l (low)
movq 64(%rsp), %r8
movq %r8, %rdx
andq $0x1FF, %rdx
shrdq $9, %r9, %r8
shrdq $9, %r10, %r9
shrdq $9, %r11, %r10
shrdq $9, %r12, %r11
shrdq $9, %r13, %r12
shrdq $9, %r14, %r13
shrdq $9, %r15, %r14
shrdq $9, %rax, %r15
shrq $9, %rax
addq %rax, %rdx
// Force carry-in then add to get s = h + l + 1
// but actually add all 1s in the top 53 bits to get simple carry out
stc
adcq (%rsp), %r8
adcq 8(%rsp), %r9
adcq 16(%rsp), %r10
adcq 24(%rsp), %r11
adcq 32(%rsp), %r12
adcq 40(%rsp), %r13
adcq 48(%rsp), %r14
adcq 56(%rsp), %r15
adcq $~0x1FF, %rdx
// Now CF is set <=> h + l + 1 >= 2^521 <=> h + l >= p_521,
// in which case the lower 521 bits are already right. Otherwise if
// CF is clear, we want to subtract 1. Hence subtract the complement
// of the carry flag then mask the top word, which scrubs the
// padding in either case.
cmc
sbbq $0, %r8
sbbq $0, %r9
sbbq $0, %r10
sbbq $0, %r11
sbbq $0, %r12
sbbq $0, %r13
sbbq $0, %r14
sbbq $0, %r15
sbbq $0, %rdx
andq $0x1FF, %rdx
// So far, this has been the same as a pure modular multiply.
// Now finally the Montgomery ingredient, which is just a 521-bit
// rotation by 9*64 - 521 = 55 bits right. Write digits back as
// they are created.
movq %r8, %rax
shrdq $55, %r9, %r8
movq %r8, (z)
shrdq $55, %r10, %r9
movq %r9, 8(z)
shrdq $55, %r11, %r10
shlq $9, %rax
movq %r10, 16(z)
shrdq $55, %r12, %r11
movq %r11, 24(z)
shrdq $55, %r13, %r12
movq %r12, 32(z)
orq %rax, %rdx
shrdq $55, %r14, %r13
movq %r13, 40(z)
shrdq $55, %r15, %r14
movq %r14, 48(z)
shrdq $55, %rdx, %r15
movq %r15, 56(z)
shrq $55, %rdx
movq %rdx, 64(z)
// Restore registers and return
CFI_INC_RSP(72)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p521_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 40,715
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/p521_jmixadd.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point mixed addition on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jmixadd(uint64_t p3[static 27], const uint64_t p1[static 27],
// const uint64_t p2[static 18]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// The "mixed" part means that p2 only has x and y coordinates, with the
// implicit z coordinate assumed to be the identity. It is assumed that
// all the coordinates of the input points p1 and p2 are fully reduced
// mod p_521, that the z coordinate of p1 is nonzero and that neither
// p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents the same affine
// point as".
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jmixadd)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jmixadd)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jmixadd)
.text
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
// These are where they arrive except for input_y, initially in %rdx
#define input_z %rdi
#define input_x %rsi
#define input_y %rcx
// Pointer-offset pairs for inputs and outputs
#define x_1 0(input_x)
#define y_1 NUMSIZE(input_x)
#define z_1 (2*NUMSIZE)(input_x)
#define x_2 0(input_y)
#define y_2 NUMSIZE(input_y)
#define x_3 0(input_z)
#define y_3 NUMSIZE(input_z)
#define z_3 (2*NUMSIZE)(input_z)
// Pointer-offset pairs for temporaries, with some aliasing
// The tmp field is internal storage for field mul and sqr.
// NSPACE is the total stack needed for these temporaries
#define zp2 (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define tmp (NUMSIZE*6)(%rsp)
#define NSPACE 496
// Corresponds exactly to bignum_mul_p521
#define mul_p521(P0,P1,P2) \
xorl %ebp, %ebp ; \
movq P2, %rdx ; \
mulxq P1, %r8, %r9 ; \
movq %r8, 432(%rsp) ; \
mulxq 0x8+P1, %rbx, %r10 ; \
adcq %rbx, %r9 ; \
mulxq 0x10+P1, %rbx, %r11 ; \
adcq %rbx, %r10 ; \
mulxq 0x18+P1, %rbx, %r12 ; \
adcq %rbx, %r11 ; \
mulxq 0x20+P1, %rbx, %r13 ; \
adcq %rbx, %r12 ; \
mulxq 0x28+P1, %rbx, %r14 ; \
adcq %rbx, %r13 ; \
mulxq 0x30+P1, %rbx, %r15 ; \
adcq %rbx, %r14 ; \
mulxq 0x38+P1, %rbx, %r8 ; \
adcq %rbx, %r15 ; \
adcq %rbp, %r8 ; \
movq 0x8+P2, %rdx ; \
xorl %ebp, %ebp ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
movq %r9, 440(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x38+P1, %rax, %r9 ; \
adcxq %rax, %r8 ; \
adoxq %rbp, %r9 ; \
adcq %rbp, %r9 ; \
movq 0x10+P2, %rdx ; \
xorl %ebp, %ebp ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
movq %r10, 448(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x38+P1, %rax, %r10 ; \
adcxq %rax, %r9 ; \
adoxq %rbp, %r10 ; \
adcq %rbp, %r10 ; \
movq 0x18+P2, %rdx ; \
xorl %ebp, %ebp ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq %r11, 456(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x38+P1, %rax, %r11 ; \
adcxq %rax, %r10 ; \
adoxq %rbp, %r11 ; \
adcq %rbp, %r11 ; \
movq 0x20+P2, %rdx ; \
xorl %ebp, %ebp ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
movq %r12, 464(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x38+P1, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rbp, %r12 ; \
adcq %rbp, %r12 ; \
movq 0x28+P2, %rdx ; \
xorl %ebp, %ebp ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
movq %r13, 472(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x38+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rbp, %r13 ; \
adcq %rbp, %r13 ; \
movq 0x30+P2, %rdx ; \
xorl %ebp, %ebp ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
movq %r14, 480(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x38+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rbp, %r14 ; \
adcq %rbp, %r14 ; \
movq 0x38+P2, %rdx ; \
xorl %ebp, %ebp ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
movq %r15, 488(%rsp) ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x38+P1, %rax, %r15 ; \
adcxq %rax, %r14 ; \
adoxq %rbp, %r15 ; \
adcq %rbp, %r15 ; \
movq 0x40+P1, %rdx ; \
xorl %ebp, %ebp ; \
mulxq P2, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x8+P2, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x10+P2, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x18+P2, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x20+P2, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x28+P2, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x30+P2, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x38+P2, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbp, %rbx ; \
adcq %rbx, %rbp ; \
movq 0x40+P2, %rdx ; \
xorl %eax, %eax ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x38+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %rbp ; \
mulxq 0x40+P1, %rax, %rbx ; \
adcq %rax, %rbp ; \
movq %r8, %rax ; \
andq $0x1ff, %rax ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rbp, %r15 ; \
shrq $0x9, %rbp ; \
addq %rax, %rbp ; \
stc; \
adcq 432(%rsp), %r8 ; \
adcq 440(%rsp), %r9 ; \
adcq 448(%rsp), %r10 ; \
adcq 456(%rsp), %r11 ; \
adcq 464(%rsp), %r12 ; \
adcq 472(%rsp), %r13 ; \
adcq 480(%rsp), %r14 ; \
adcq 488(%rsp), %r15 ; \
adcq $0xfffffffffffffe00, %rbp ; \
cmc; \
sbbq $0x0, %r8 ; \
movq %r8, P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0 ; \
sbbq $0x0, %r14 ; \
movq %r14, 0x30+P0 ; \
sbbq $0x0, %r15 ; \
movq %r15, 0x38+P0 ; \
sbbq $0x0, %rbp ; \
andq $0x1ff, %rbp ; \
movq %rbp, 0x40+P0
// Corresponds exactly to bignum_sqr_p521
#define sqr_p521(P0,P1) \
xorl %ebp, %ebp ; \
movq P1, %rdx ; \
mulxq 0x8+P1, %r9, %rax ; \
movq %r9, 440(%rsp) ; \
mulxq 0x10+P1, %r10, %rbx ; \
adcxq %rax, %r10 ; \
movq %r10, 448(%rsp) ; \
mulxq 0x18+P1, %r11, %rax ; \
adcxq %rbx, %r11 ; \
mulxq 0x20+P1, %r12, %rbx ; \
adcxq %rax, %r12 ; \
mulxq 0x28+P1, %r13, %rax ; \
adcxq %rbx, %r13 ; \
mulxq 0x30+P1, %r14, %rbx ; \
adcxq %rax, %r14 ; \
mulxq 0x38+P1, %r15, %r8 ; \
adcxq %rbx, %r15 ; \
adcxq %rbp, %r8 ; \
xorl %ebp, %ebp ; \
movq 0x8+P1, %rdx ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
movq %r11, 456(%rsp) ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
movq %r12, 464(%rsp) ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x38+P1, %rax, %r9 ; \
adcxq %rax, %r8 ; \
adoxq %rbp, %r9 ; \
movq 0x20+P1, %rdx ; \
mulxq 0x28+P1, %rax, %r10 ; \
adcxq %rax, %r9 ; \
adoxq %rbp, %r10 ; \
adcxq %rbp, %r10 ; \
xorl %ebp, %ebp ; \
movq 0x10+P1, %rdx ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
movq %r13, 472(%rsp) ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
movq %r14, 480(%rsp) ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x38+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
movq 0x30+P1, %rdx ; \
mulxq 0x20+P1, %rax, %r11 ; \
adcxq %rax, %r10 ; \
adoxq %rbp, %r11 ; \
mulxq 0x28+P1, %rax, %r12 ; \
adcxq %rax, %r11 ; \
adoxq %rbp, %r12 ; \
adcxq %rbp, %r12 ; \
xorl %ebp, %ebp ; \
movq 0x18+P1, %rdx ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %r8 ; \
movq %r15, 488(%rsp) ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x38+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
movq 0x38+P1, %rdx ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x28+P1, %rax, %r13 ; \
adcxq %rax, %r12 ; \
adoxq %rbp, %r13 ; \
mulxq 0x30+P1, %rax, %r14 ; \
adcxq %rax, %r13 ; \
adoxq %rbp, %r14 ; \
adcxq %rbp, %r14 ; \
xorl %ebp, %ebp ; \
movq P1, %rdx ; \
mulxq %rdx, %rax, %rbx ; \
movq %rax, 432(%rsp) ; \
movq 440(%rsp), %rax ; \
adcxq %rax, %rax ; \
adoxq %rbx, %rax ; \
movq %rax, 440(%rsp) ; \
movq 448(%rsp), %rax ; \
movq 0x8+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %rax, %rax ; \
adoxq %rdx, %rax ; \
movq %rax, 448(%rsp) ; \
movq 456(%rsp), %rax ; \
adcxq %rax, %rax ; \
adoxq %rbx, %rax ; \
movq %rax, 456(%rsp) ; \
movq 464(%rsp), %rax ; \
movq 0x10+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %rax, %rax ; \
adoxq %rdx, %rax ; \
movq %rax, 464(%rsp) ; \
movq 472(%rsp), %rax ; \
adcxq %rax, %rax ; \
adoxq %rbx, %rax ; \
movq %rax, 472(%rsp) ; \
movq 480(%rsp), %rax ; \
movq 0x18+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %rax, %rax ; \
adoxq %rdx, %rax ; \
movq %rax, 480(%rsp) ; \
movq 488(%rsp), %rax ; \
adcxq %rax, %rax ; \
adoxq %rbx, %rax ; \
movq %rax, 488(%rsp) ; \
movq 0x20+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %r8, %r8 ; \
adoxq %rdx, %r8 ; \
adcxq %r9, %r9 ; \
adoxq %rbx, %r9 ; \
movq 0x28+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %r10, %r10 ; \
adoxq %rdx, %r10 ; \
adcxq %r11, %r11 ; \
adoxq %rbx, %r11 ; \
movq 0x30+P1, %rdx ; \
mulxq %rdx, %rdx, %rbx ; \
adcxq %r12, %r12 ; \
adoxq %rdx, %r12 ; \
adcxq %r13, %r13 ; \
adoxq %rbx, %r13 ; \
movq 0x38+P1, %rdx ; \
mulxq %rdx, %rdx, %r15 ; \
adcxq %r14, %r14 ; \
adoxq %rdx, %r14 ; \
adcxq %rbp, %r15 ; \
adoxq %rbp, %r15 ; \
movq 0x40+P1, %rdx ; \
movq %rdx, %rbp ; \
imulq %rbp, %rbp ; \
addq %rdx, %rdx ; \
mulxq P1, %rax, %rbx ; \
adcxq %rax, %r8 ; \
adoxq %rbx, %r9 ; \
mulxq 0x8+P1, %rax, %rbx ; \
adcxq %rax, %r9 ; \
adoxq %rbx, %r10 ; \
mulxq 0x10+P1, %rax, %rbx ; \
adcxq %rax, %r10 ; \
adoxq %rbx, %r11 ; \
mulxq 0x18+P1, %rax, %rbx ; \
adcxq %rax, %r11 ; \
adoxq %rbx, %r12 ; \
mulxq 0x20+P1, %rax, %rbx ; \
adcxq %rax, %r12 ; \
adoxq %rbx, %r13 ; \
mulxq 0x28+P1, %rax, %rbx ; \
adcxq %rax, %r13 ; \
adoxq %rbx, %r14 ; \
mulxq 0x30+P1, %rax, %rbx ; \
adcxq %rax, %r14 ; \
adoxq %rbx, %r15 ; \
mulxq 0x38+P1, %rax, %rbx ; \
adcxq %rax, %r15 ; \
adoxq %rbx, %rbp ; \
adcq $0x0, %rbp ; \
movq %r8, %rax ; \
andq $0x1ff, %rax ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rbp, %r15 ; \
shrq $0x9, %rbp ; \
addq %rax, %rbp ; \
stc; \
adcq 432(%rsp), %r8 ; \
adcq 440(%rsp), %r9 ; \
adcq 448(%rsp), %r10 ; \
adcq 456(%rsp), %r11 ; \
adcq 464(%rsp), %r12 ; \
adcq 472(%rsp), %r13 ; \
adcq 480(%rsp), %r14 ; \
adcq 488(%rsp), %r15 ; \
adcq $0xfffffffffffffe00, %rbp ; \
cmc; \
sbbq $0x0, %r8 ; \
movq %r8, P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0 ; \
sbbq $0x0, %r14 ; \
movq %r14, 0x30+P0 ; \
sbbq $0x0, %r15 ; \
movq %r15, 0x38+P0 ; \
sbbq $0x0, %rbp ; \
andq $0x1ff, %rbp ; \
movq %rbp, 0x40+P0
// Corresponds exactly to bignum_sub_p521
#define sub_p521(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rdx ; \
sbbq 0x8+P2, %rdx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
sbbq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
sbbq 0x28+P2, %r11 ; \
movq 0x30+P1, %r12 ; \
sbbq 0x30+P2, %r12 ; \
movq 0x38+P1, %r13 ; \
sbbq 0x38+P2, %r13 ; \
movq 0x40+P1, %r14 ; \
sbbq 0x40+P2, %r14 ; \
sbbq $0x0, %rax ; \
movq %rax, P0 ; \
sbbq $0x0, %rdx ; \
movq %rdx, 0x8+P0 ; \
sbbq $0x0, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x30+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x38+P0 ; \
sbbq $0x0, %r14 ; \
andq $0x1ff, %r14 ; \
movq %r14, 0x40+P0
// Additional macros to help with final multiplexing
#define testzero9(P) \
movq P, %rax ; \
movq 8+P, %rbx ; \
movq 16+P, %rdx ; \
movq 24+P, %rbp ; \
orq 32+P, %rax ; \
orq 40+P, %rbx ; \
orq 48+P, %rdx ; \
orq 56+P, %rbp ; \
orq %rbx, %rax ; \
orq %rbp, %rdx ; \
orq 64+P, %rax ; \
orq %rdx, %rax
#define mux9(P0,PNE,PEQ) \
movq PNE, %rax ; \
movq PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, P0 ; \
movq 8+PNE, %rax ; \
movq 8+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 8+P0 ; \
movq 16+PNE, %rax ; \
movq 16+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 16+P0 ; \
movq 24+PNE, %rax ; \
movq 24+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 24+P0 ; \
movq 32+PNE, %rax ; \
movq 32+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 32+P0 ; \
movq 40+PNE, %rax ; \
movq 40+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 40+P0 ; \
movq 48+PNE, %rax ; \
movq 48+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 48+P0 ; \
movq 56+PNE, %rax ; \
movq 56+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 56+P0 ; \
movq 64+PNE, %rax ; \
movq 64+PEQ, %rbx ; \
cmovzq %rbx, %rax ; \
movq %rax, 64+P0
#define mux9c(P0,PNE) \
movq PNE, %rax ; \
movl $1, %ebx ; \
cmovzq %rbx, %rax ; \
movq %rax, P0 ; \
movq 8+PNE, %rax ; \
movl $0, %ebx ; \
cmovzq %rbx, %rax ; \
movq %rax, 8+P0 ; \
movq 16+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 16+P0 ; \
movq 24+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 24+P0 ; \
movq 32+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 32+P0 ; \
movq 40+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 40+P0 ; \
movq 48+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 48+P0 ; \
movq 56+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 56+P0 ; \
movq 64+PNE, %rax ; \
cmovzq %rbx, %rax ; \
movq %rax, 64+P0
#define copy9(P0,P1) \
movq P1, %rax ; \
movq %rax, P0 ; \
movq 8+P1, %rax ; \
movq %rax, 8+P0 ; \
movq 16+P1, %rax ; \
movq %rax, 16+P0 ; \
movq 24+P1, %rax ; \
movq %rax, 24+P0 ; \
movq 32+P1, %rax ; \
movq %rax, 32+P0 ; \
movq 40+P1, %rax ; \
movq %rax, 40+P0 ; \
movq 48+P1, %rax ; \
movq %rax, 48+P0 ; \
movq 56+P1, %rax ; \
movq %rax, 56+P0 ; \
movq 64+P1, %rax ; \
movq %rax, 64+P0
S2N_BN_SYMBOL(p521_jmixadd):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Move the input arguments to stable places (two are already there)
movq %rdx, input_y
// Main code, just a sequence of basic field operations
sqr_p521(zp2,z_1)
mul_p521(y2a,z_1,y_2)
mul_p521(x2a,zp2,x_2)
mul_p521(y2a,zp2,y2a)
sub_p521(xd,x2a,x_1)
sub_p521(yd,y2a,y_1)
sqr_p521(zz,xd)
sqr_p521(ww,yd)
mul_p521(zzx1,zz,x_1)
mul_p521(zzx2,zz,x2a)
sub_p521(resx,ww,zzx1)
sub_p521(t1,zzx2,zzx1)
mul_p521(resz,xd,z_1)
sub_p521(resx,resx,zzx2)
sub_p521(t2,zzx1,resx)
mul_p521(t1,t1,y_1)
mul_p521(t2,yd,t2)
sub_p521(resy,t2,t1)
// Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence)
testzero9(z_1)
// Multiplex: if p1 <> 0 just copy the computed result from the staging area.
// If p1 = 0 then return the point p2 augmented with an extra z = 1
// coordinate, hence giving 0 + p2 = p2 for the final result.
mux9 (resx,resx,x_2)
mux9 (resy,resy,y_2)
copy9(x_3,resx)
copy9(y_3,resy)
mux9c(z_3,resz)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p521_jmixadd)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 3,977
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_triple_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Triple modulo p_521, z := (3 * x) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_triple_p521(uint64_t z[static 9],
// const uint64_t x[static 9]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p521)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p521)
.text
#define z %rdi
#define x %rsi
// d7 re-uses the input pointer when safe to do so
#define d0 %rax
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define d4 %r10
#define d5 %r11
#define d6 %r12
#define d7 %rsi
#define d8 %rdx
#define m %rbx
#define mshort %ebx
S2N_BN_SYMBOL(bignum_triple_p521):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
// Load the top (short) word first to compute the initial carry-in
// Set OF according to bit 520, but *always* set CF to get a +1 bump
movq 64(x), m
movq m, d8
shlq $54, m
addq m, m
stc
// Use a double carry chain to compute x' + x + 1 where x' is a
// 1-bit left rotation of x; this is then == 3 * x + 1 (mod p_521)
// This gives us s = [d8;d7;d6;d5;d4;d3;d2;d1;d0] = x + x' + 1.
movq (x), m
movq m, d0
adcxq m, m
adoxq m, d0
movq 8(x), m
movq m, d1
adcxq m, m
adoxq m, d1
movq 16(x), m
movq m, d2
adcxq m, m
adoxq m, d2
movq 24(x), m
movq m, d3
adcxq m, m
adoxq m, d3
movq 32(x), m
movq m, d4
adcxq m, m
adoxq m, d4
movq 40(x), m
movq m, d5
adcxq m, m
adoxq m, d5
movq 48(x), m
movq m, d6
adcxq m, m
adoxq m, d6
movq 56(x), m
movq m, d7
adcxq m, m
adoxq m, d7
// The last word is slightly more intricate: we naturally end up adding
// 2 * top bit when we shouldn't (because it's a rotation and we've already
// added it at the LSB position) but then compensate by subtracting it.
movq d8, m
adcxq m, m
adoxq m, d8
andq $0x200, m
subq m, d8
// Now x + x' >= p_521 <=> s = x + x' + 1 >= 2^521
// Make m = 512 * [x + x' >= p_521]
movl $512, mshort
andq d8, m
// Now if x + x' >= p_521, we want (x + x') - p_521 = s - 2^521
// while otherwise we want x + x' = s - 1
// We use the mask m both as an operand and to generate the dual carry
// Write back the results as generated
cmpq $512, m
sbbq $0, d0
movq d0, (z)
sbbq $0, d1
movq d1, 8(z)
sbbq $0, d2
movq d2, 16(z)
sbbq $0, d3
movq d3, 24(z)
sbbq $0, d4
movq d4, 32(z)
sbbq $0, d5
movq d5, 40(z)
sbbq $0, d6
movq d6, 48(z)
sbbq $0, d7
movq d7, 56(z)
sbbq m, d8
movq d8, 64(z)
// Restore registers and return
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_triple_p521)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 9,410
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_montsqr_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery square, z := (x^2 / 2^576) mod p_521
// Input x[9]; output z[9]
//
// extern void bignum_montsqr_p521(uint64_t z[static 9],
// const uint64_t x[static 9]);
//
// Does z := (x^2 / 2^576) mod p_521, assuming x < p_521. This means the
// Montgomery base is the "native size" 2^{9*64} = 2^576; since p_521 is
// a Mersenne prime the basic modular squaring bignum_sqr_p521 can be
// considered a Montgomery operation to base 2^521.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p521)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p521)
.text
#define z %rdi
#define x %rsi
// A zero register
#define zero %rbp
#define zeroe %ebp
// mulpadd(high,low,i) adds %rdx * x[i] to a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax and %rcx as temporaries.
#define mulpadd(high,low,I) \
mulxq I(x), %rax, %rcx ; \
adcxq %rax, low ; \
adoxq %rcx, high
// mulpade(high,low,i) adds %rdx * x[i] to a register-pair (high,low)
// maintaining consistent double-carrying with adcx and adox,
// using %rax as a temporary, assuming high created from scratch
// and that zero has value zero.
#define mulpade(high,low,I) \
mulxq I(x), %rax, high ; \
adcxq %rax, low ; \
adoxq zero, high
S2N_BN_SYMBOL(bignum_montsqr_p521):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with and make temporary space on stack
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(64)
// Do a basic 8x8 squaring stashing %rsp[0..7] but keeping the
// top half in the usual rotating register window %r15,...,%r8. Except
// for the lack of full writeback this is the same as bignum_sqr_8_16.
xorl zeroe, zeroe
movq (x), %rdx
mulxq 8(x), %r9, %rax
movq %r9, 8(%rsp)
mulxq 16(x), %r10, %rcx
adcxq %rax, %r10
movq %r10, 16(%rsp)
mulxq 24(x), %r11, %rax
adcxq %rcx, %r11
mulxq 32(x), %r12, %rcx
adcxq %rax, %r12
mulxq 40(x), %r13, %rax
adcxq %rcx, %r13
mulxq 48(x), %r14, %rcx
adcxq %rax, %r14
mulxq 56(x), %r15, %r8
adcxq %rcx, %r15
adcxq zero, %r8
xorl zeroe, zeroe
movq 8(x), %rdx
mulpadd(%r12,%r11,16)
movq %r11, 24(%rsp)
mulpadd(%r13,%r12,24)
movq %r12, 32(%rsp)
mulpadd(%r14,%r13,32)
mulpadd(%r15,%r14,40)
mulpadd(%r8,%r15,48)
mulpade(%r9,%r8,56)
movq 32(x), %rdx
mulpade(%r10,%r9,40)
adcxq zero, %r10
xorl zeroe, zeroe
movq 16(x), %rdx
mulpadd(%r14,%r13,24)
movq %r13, 40(%rsp)
mulpadd(%r15,%r14,32)
movq %r14, 48(%rsp)
mulpadd(%r8,%r15,40)
mulpadd(%r9,%r8,48)
mulpadd(%r10,%r9,56)
movq 48(x), %rdx
mulpade(%r11,%r10,32)
mulpade(%r12,%r11,40)
adcxq zero, %r12
xorl zeroe, zeroe
movq 24(x), %rdx
mulpadd(%r8,%r15,32)
movq %r15, 56(%rsp)
mulpadd(%r9,%r8,40)
mulpadd(%r10,%r9,48)
mulpadd(%r11,%r10,56)
movq 56(x), %rdx
mulpadd(%r12,%r11,32)
mulpade(%r13,%r12,40)
mulpade(%r14,%r13,48)
adcxq zero, %r14
xorl zeroe, zeroe
movq (x), %rdx
mulxq %rdx, %rax, %rcx
movq %rax, (%rsp)
movq 8(%rsp), %rax
adcxq %rax, %rax
adoxq %rcx, %rax
movq %rax, 8(%rsp)
movq 16(%rsp), %rax
movq 8(x), %rdx
mulxq %rdx, %rdx, %rcx
adcxq %rax, %rax
adoxq %rdx, %rax
movq %rax, 16(%rsp)
movq 24(%rsp), %rax
adcxq %rax, %rax
adoxq %rcx, %rax
movq %rax, 24(%rsp)
movq 32(%rsp), %rax
movq 16(x), %rdx
mulxq %rdx, %rdx, %rcx
adcxq %rax, %rax
adoxq %rdx, %rax
movq %rax, 32(%rsp)
movq 40(%rsp), %rax
adcxq %rax, %rax
adoxq %rcx, %rax
movq %rax, 40(%rsp)
movq 48(%rsp), %rax
movq 24(x), %rdx
mulxq %rdx, %rdx, %rcx
adcxq %rax, %rax
adoxq %rdx, %rax
movq %rax, 48(%rsp)
movq 56(%rsp), %rax
adcxq %rax, %rax
adoxq %rcx, %rax
movq %rax, 56(%rsp)
movq 32(x), %rdx
mulxq %rdx, %rdx, %rcx
adcxq %r8, %r8
adoxq %rdx, %r8
adcxq %r9, %r9
adoxq %rcx, %r9
movq 40(x), %rdx
mulxq %rdx, %rdx, %rcx
adcxq %r10, %r10
adoxq %rdx, %r10
adcxq %r11, %r11
adoxq %rcx, %r11
movq 48(x), %rdx
mulxq %rdx, %rdx, %rcx
adcxq %r12, %r12
adoxq %rdx, %r12
adcxq %r13, %r13
adoxq %rcx, %r13
movq 56(x), %rdx
mulxq %rdx, %rdx, %r15
adcxq %r14, %r14
adoxq %rdx, %r14
adcxq zero, %r15
adoxq zero, %r15
// Augment the high part with the contribution from the top little word C.
// If we write the input as 2^512 * C + x then we are otherwise just doing
// x^2, so we need to add to the high part 2^512 * C^2 + (2 * C) * x.
// The initial doubling add of C also clears the CF and OF flags as desired.
// We extend the window now to the 9-element %rbp,%r15,%r14,...,%r8.
movq 64(x), %rdx
movq %rdx, %rbp
imulq %rbp, %rbp
addq %rdx, %rdx
mulpadd(%r9,%r8,0)
mulpadd(%r10,%r9,8)
mulpadd(%r11,%r10,16)
mulpadd(%r12,%r11,24)
mulpadd(%r13,%r12,32)
mulpadd(%r14,%r13,40)
mulpadd(%r15,%r14,48)
mulxq 56(x), %rax, %rcx
adcxq %rax, %r15
adoxq %rcx, %rbp
adcq $0, %rbp
// Rotate the upper portion right 9 bits since 2^512 == 2^-9 (mod p_521)
// Let rotated result %rbp,%r15,%r14,...,%r8 be h (high) and %rsp[0..7] be l (low)
movq %r8, %rax
andq $0x1FF, %rax
shrdq $9, %r9, %r8
shrdq $9, %r10, %r9
shrdq $9, %r11, %r10
shrdq $9, %r12, %r11
shrdq $9, %r13, %r12
shrdq $9, %r14, %r13
shrdq $9, %r15, %r14
shrdq $9, %rbp, %r15
shrq $9, %rbp
addq %rax, %rbp
// Force carry-in then add to get s = h + l + 1
// but actually add all 1s in the top 53 bits to get simple carry out
stc
adcq (%rsp), %r8
adcq 8(%rsp), %r9
adcq 16(%rsp), %r10
adcq 24(%rsp), %r11
adcq 32(%rsp), %r12
adcq 40(%rsp), %r13
adcq 48(%rsp), %r14
adcq 56(%rsp), %r15
adcq $~0x1FF, %rbp
// Now CF is set <=> h + l + 1 >= 2^521 <=> h + l >= p_521,
// in which case the lower 521 bits are already right. Otherwise if
// CF is clear, we want to subtract 1. Hence subtract the complement
// of the carry flag then mask the top word, which scrubs the
// padding in either case.
cmc
sbbq $0, %r8
sbbq $0, %r9
sbbq $0, %r10
sbbq $0, %r11
sbbq $0, %r12
sbbq $0, %r13
sbbq $0, %r14
sbbq $0, %r15
sbbq $0, %rbp
andq $0x1FF, %rbp
// So far, this has been the same as a pure modular squaring.
// Now finally the Montgomery ingredient, which is just a 521-bit
// rotation by 9*64 - 521 = 55 bits right. Write digits back as
// they are created.
movq %r8, %rax
shrdq $55, %r9, %r8
movq %r8, (z)
shrdq $55, %r10, %r9
movq %r9, 8(z)
shrdq $55, %r11, %r10
shlq $9, %rax
movq %r10, 16(z)
shrdq $55, %r12, %r11
movq %r11, 24(z)
shrdq $55, %r13, %r12
movq %r12, 32(z)
orq %rax, %rbp
shrdq $55, %r14, %r13
movq %r13, 40(z)
shrdq $55, %r15, %r14
movq %r14, 48(z)
shrdq $55, %rbp, %r15
movq %r15, 56(z)
shrq $55, %rbp
movq %rbp, 64(z)
// Restore registers and return
CFI_INC_RSP(64)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p521)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 5,021
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_cmul_p521_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply by a single word modulo p_521, z := (c * x) mod p_521, assuming
// x reduced
// Inputs c, x[9]; output z[9]
//
// extern void bignum_cmul_p521_alt(uint64_t z[static 9], uint64_t c,
// const uint64_t x[static 9]);
//
// Standard x86-64 ABI: RDI = z, RSI = c, RDX = x
// Microsoft x64 ABI: RCX = z, RDX = c, R8 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p521_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p521_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p521_alt)
.text
#define z %rdi
// Temporarily moved here for initial multiply
#define x %rcx
// Likewise this is thrown away after initial multiply
#define m %rsi
#define c %rdx
#define cshort %edx
#define a %rax
#define d %rdx
#define dd %rax
// Digits: last ones aliased to inputs that are no longer used then
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
#define d4 %rbx
#define d5 %rbp
#define d6 %r12
#define d7 %r13
#define d8 %rcx
#define d9 %rsi
// Same as d9
#define h d9
S2N_BN_SYMBOL(bignum_cmul_p521_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save additional registers to use
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
// Shuffle inputs (since we want %rdx for the high parts of products)
movq %rdx, x
// Multiply as [d9; ...; d0] = c * x.
movq (x), a
mulq m
movq a, d0
movq d, d1
movq 8(x), a
mulq m
xorq d2, d2
addq a, d1
adcq d, d2
movq 16(x), a
mulq m
xorq d3, d3
addq a, d2
adcq d, d3
movq 24(x), a
mulq m
xorq d4, d4
addq a, d3
adcq d, d4
movq 32(x), a
mulq m
xorq d5, d5
addq a, d4
adcq d, d5
movq 40(x), a
mulq m
xorq d6, d6
addq a, d5
adcq d, d6
movq 48(x), a
mulq m
xorq d7, d7
addq a, d6
adcq d, d7
movq 56(x), a
mulq m
addq a, d7
movq 64(x), a
movq $0, d8
adcq d, d8
mulq m
xorq d9, d9
addq a, d8
adcq d, d9
// Create an AND "dd" of digits d7,...,d1, a computation we hope will
// get nicely interleaved with the multiplication chain above, though
// we can't do so directly as we are using the same register %rax.
movq d1, dd
andq d2, dd
andq d3, dd
andq d4, dd
andq d5, dd
andq d6, dd
andq d7, dd
// Extract the high part h==d9 and mask off the low part l = [d8;d7;...;d0]
// but stuff d8 with 1 bits at the left to ease a comparison below
shldq $55, d8, h
orq $~0x1FF, d8
// Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only
// happen if digits d7,...d1 are all 1s, we use the AND of them "dd" to
// condense the carry chain, and since we stuffed 1 bits into d8 we get
// the result in CF without an additional comparison. Hereafter we use c = 0.
// Since x was assumed reduced, h cannot be maximal, so the "lea" is safe,
// i.e. does not carry or wrap round.
leaq 1(h), c
addq d0, c
movl $0, cshort
adcq c, dd
movq d8, a
adcq c, a
// Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521
// while otherwise we want just h + l. So mask h + l + CF to 521 bits.
// This masking also gets rid of the stuffing with 1s we did above.
// Write back the digits as they are generated.
adcq h, d0
movq d0, (z)
adcq c, d1
movq d1, 8(z)
adcq c, d2
movq d2, 16(z)
adcq c, d3
movq d3, 24(z)
adcq c, d4
movq d4, 32(z)
adcq c, d5
movq d5, 40(z)
adcq c, d6
movq d6, 48(z)
adcq c, d7
movq d7, 56(z)
adcq c, d8
andq $0x1FF, d8
movq d8, 64(z)
// Restore registers and return
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_cmul_p521_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 89,373
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/p521_jdouble_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jdouble_alt(uint64_t p3[static 27],
// const uint64_t p1[static 27]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input point are fully
// reduced mod p_521 and that the z coordinate is not zero.
//
// Standard x86-64 ABI: RDI = p3, RSI = p1
// Microsoft x64 ABI: RCX = p3, RDX = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jdouble_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jdouble_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jdouble_alt)
.text
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
// This is actually where they come in anyway and they stay there.
#define input_z %rdi
#define input_x %rsi
// Pointer-offset pairs for inputs and outputs
#define x_1 0(input_x)
#define y_1 NUMSIZE(input_x)
#define z_1 (2*NUMSIZE)(input_x)
#define x_3 0(input_z)
#define y_3 NUMSIZE(input_z)
#define z_3 (2*NUMSIZE)(input_z)
// Pointer-offset pairs for temporaries, with some aliasing
// The tmp field is internal storage for field mul and sqr.
// NSPACE is the total stack needed for these temporaries
#define z2 (NUMSIZE*0)(%rsp)
#define y2 (NUMSIZE*1)(%rsp)
#define x2p (NUMSIZE*2)(%rsp)
#define xy2 (NUMSIZE*3)(%rsp)
#define y4 (NUMSIZE*4)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define dx2 (NUMSIZE*5)(%rsp)
#define t1 (NUMSIZE*5)(%rsp)
#define d (NUMSIZE*6)(%rsp)
#define x4p (NUMSIZE*6)(%rsp)
#define tmp (NUMSIZE*7)(%rsp)
#define NSPACE 576
// Corresponds to bignum_mul_p521_alt except temp storage location
#define mul_p521(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, 504(%rsp) ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
movq %r9, 512(%rsp) ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq %r10, 520(%rsp) ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq %r11, 528(%rsp) ; \
xorq %r14, %r14 ; \
movq P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x20+P1, %rax ; \
mulq P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq %r12, 536(%rsp) ; \
xorq %r15, %r15 ; \
movq P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x8+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x20+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x28+P1, %rax ; \
mulq P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq %r13, 544(%rsp) ; \
xorq %r8, %r8 ; \
movq P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq %r8, %r8 ; \
movq 0x8+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x10+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x20+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x28+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x30+P1, %rax ; \
mulq P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq %r14, 552(%rsp) ; \
xorq %r9, %r9 ; \
movq P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq %r9, %r9 ; \
movq 0x8+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x10+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x18+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x20+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x28+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x30+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x38+P1, %rax ; \
mulq P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq %r15, 560(%rsp) ; \
xorq %r10, %r10 ; \
movq P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %r10, %r10 ; \
movq 0x8+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x10+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x18+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x28+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x30+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x38+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x40+P1, %rax ; \
mulq P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq %r8, 568(%rsp) ; \
xorq %r11, %r11 ; \
movq 0x8+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
movq 0x10+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x18+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x20+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x28+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x30+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x38+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x40+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq 0x10+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x18+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x20+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x28+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x30+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x38+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x40+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq 0x18+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x20+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x28+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x30+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x38+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x40+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x20+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x28+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x30+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x38+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x40+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x28+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x30+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x38+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x40+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
xorq %r8, %r8 ; \
movq 0x30+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq %r8, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x40+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
imulq 0x40+P2, %rax ; \
addq %r8, %rax ; \
movq 568(%rsp), %r8 ; \
movq %r8, %rdx ; \
andq $0x1ff, %rdx ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rax, %r15 ; \
shrq $0x9, %rax ; \
addq %rax, %rdx ; \
stc; \
adcq 504(%rsp), %r8 ; \
adcq 512(%rsp), %r9 ; \
adcq 520(%rsp), %r10 ; \
adcq 528(%rsp), %r11 ; \
adcq 536(%rsp), %r12 ; \
adcq 544(%rsp), %r13 ; \
adcq 552(%rsp), %r14 ; \
adcq 560(%rsp), %r15 ; \
adcq $0xfffffffffffffe00, %rdx ; \
cmc; \
sbbq $0x0, %r8 ; \
movq %r8, P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0 ; \
sbbq $0x0, %r14 ; \
movq %r14, 0x30+P0 ; \
sbbq $0x0, %r15 ; \
movq %r15, 0x38+P0 ; \
sbbq $0x0, %rdx ; \
andq $0x1ff, %rdx ; \
movq %rdx, 0x40+P0
// Corresponds to bignum_sqr_p521_alt except temp storage location
#define sqr_p521(P0,P1) \
movq P1, %rax ; \
mulq %rax; \
movq %rax, 504(%rsp) ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r11 ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq %r9, 512(%rsp) ; \
xorq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq %rax; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r12 ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq %r10, 520(%rsp) ; \
movq P1, %rax ; \
mulq 0x18+P1; \
xorq %r13, %r13 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r13 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r13, %r13 ; \
addq %rbx, %r11 ; \
adcq %rcx, %r12 ; \
adcq $0x0, %r13 ; \
movq %r11, 528(%rsp) ; \
movq P1, %rax ; \
mulq 0x20+P1; \
xorq %r14, %r14 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r14 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r14, %r14 ; \
addq %rbx, %r12 ; \
adcq %rcx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq %r12, 536(%rsp) ; \
movq P1, %rax ; \
mulq 0x28+P1; \
xorq %r15, %r15 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq 0x20+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r15 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r15, %r15 ; \
addq %rbx, %r13 ; \
adcq %rcx, %r14 ; \
adcq $0x0, %r15 ; \
movq %r13, 544(%rsp) ; \
movq P1, %rax ; \
mulq 0x30+P1; \
xorq %r8, %r8 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r8 ; \
movq 0x10+P1, %rax ; \
mulq 0x20+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r8 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r8, %r8 ; \
addq %rbx, %r14 ; \
adcq %rcx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq %r14, 552(%rsp) ; \
movq P1, %rax ; \
mulq 0x38+P1; \
xorq %r9, %r9 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r9 ; \
movq 0x10+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r9 ; \
movq 0x18+P1, %rax ; \
mulq 0x20+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r9 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r9, %r9 ; \
addq %rbx, %r15 ; \
adcq %rcx, %r8 ; \
adcq $0x0, %r9 ; \
movq %r15, 560(%rsp) ; \
movq P1, %rax ; \
mulq 0x40+P1; \
xorq %r10, %r10 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x8+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r10 ; \
movq 0x10+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r10 ; \
movq 0x18+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r10 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r10, %r10 ; \
addq %rbx, %r8 ; \
adcq %rcx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P1, %rax ; \
mulq %rax; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq %r8, 568(%rsp) ; \
movq 0x8+P1, %rax ; \
mulq 0x40+P1; \
xorq %r11, %r11 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x10+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r11 ; \
movq 0x18+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r11 ; \
movq 0x20+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r11 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r11, %r11 ; \
addq %rbx, %r9 ; \
adcq %rcx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x10+P1, %rax ; \
mulq 0x40+P1; \
xorq %r12, %r12 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x18+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r12 ; \
movq 0x20+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r12 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r12, %r12 ; \
addq %rbx, %r10 ; \
adcq %rcx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x28+P1, %rax ; \
mulq %rax; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x18+P1, %rax ; \
mulq 0x40+P1; \
xorq %r13, %r13 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x20+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r13 ; \
movq 0x28+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r13 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r13, %r13 ; \
addq %rbx, %r11 ; \
adcq %rcx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x20+P1, %rax ; \
mulq 0x40+P1; \
xorq %r14, %r14 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x28+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r14 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r14, %r14 ; \
addq %rbx, %r12 ; \
adcq %rcx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x30+P1, %rax ; \
mulq %rax; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x28+P1, %rax ; \
mulq 0x40+P1; \
xorq %r15, %r15 ; \
movq %rax, %rbx ; \
movq %rdx, %rcx ; \
movq 0x30+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rcx ; \
adcq $0x0, %r15 ; \
addq %rbx, %rbx ; \
adcq %rcx, %rcx ; \
adcq %r15, %r15 ; \
addq %rbx, %r13 ; \
adcq %rcx, %r14 ; \
adcq $0x0, %r15 ; \
xorq %r8, %r8 ; \
movq 0x38+P1, %rax ; \
mulq %rax; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x30+P1, %rax ; \
mulq 0x40+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r8 ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x40+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
imulq %rax, %rax ; \
addq %r8, %rax ; \
movq 568(%rsp), %r8 ; \
movq %r8, %rdx ; \
andq $0x1ff, %rdx ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rax, %r15 ; \
shrq $0x9, %rax ; \
addq %rax, %rdx ; \
stc; \
adcq 504(%rsp), %r8 ; \
adcq 512(%rsp), %r9 ; \
adcq 520(%rsp), %r10 ; \
adcq 528(%rsp), %r11 ; \
adcq 536(%rsp), %r12 ; \
adcq 544(%rsp), %r13 ; \
adcq 552(%rsp), %r14 ; \
adcq 560(%rsp), %r15 ; \
adcq $0xfffffffffffffe00, %rdx ; \
cmc; \
sbbq $0x0, %r8 ; \
movq %r8, P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0 ; \
sbbq $0x0, %r14 ; \
movq %r14, 0x30+P0 ; \
sbbq $0x0, %r15 ; \
movq %r15, 0x38+P0 ; \
sbbq $0x0, %rdx ; \
andq $0x1ff, %rdx ; \
movq %rdx, 0x40+P0 ; \
// Corresponds exactly to bignum_add_p521
#define add_p521(P0,P1,P2) \
stc; \
movq P1, %rax ; \
adcq P2, %rax ; \
movq 0x8+P1, %rbx ; \
adcq 0x8+P2, %rbx ; \
movq 0x10+P1, %r8 ; \
adcq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
adcq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
adcq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
adcq 0x28+P2, %r11 ; \
movq 0x30+P1, %r12 ; \
adcq 0x30+P2, %r12 ; \
movq 0x38+P1, %r13 ; \
adcq 0x38+P2, %r13 ; \
movq 0x40+P1, %r14 ; \
adcq 0x40+P2, %r14 ; \
movq $0x200, %rdx ; \
andq %r14, %rdx ; \
cmpq $0x200, %rdx ; \
sbbq $0x0, %rax ; \
movq %rax, P0 ; \
sbbq $0x0, %rbx ; \
movq %rbx, 0x8+P0 ; \
sbbq $0x0, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x30+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x38+P0 ; \
sbbq %rdx, %r14 ; \
movq %r14, 0x40+P0
// Corresponds exactly to bignum_sub_p521
#define sub_p521(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rdx ; \
sbbq 0x8+P2, %rdx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
sbbq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
sbbq 0x28+P2, %r11 ; \
movq 0x30+P1, %r12 ; \
sbbq 0x30+P2, %r12 ; \
movq 0x38+P1, %r13 ; \
sbbq 0x38+P2, %r13 ; \
movq 0x40+P1, %r14 ; \
sbbq 0x40+P2, %r14 ; \
sbbq $0x0, %rax ; \
movq %rax, P0 ; \
sbbq $0x0, %rdx ; \
movq %rdx, 0x8+P0 ; \
sbbq $0x0, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x30+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x38+P0 ; \
sbbq $0x0, %r14 ; \
andq $0x1ff, %r14 ; \
movq %r14, 0x40+P0
// Weak multiplication not fully reducing
#define weakmul_p521(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, 504(%rsp) ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
movq %r9, 512(%rsp) ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq %r10, 520(%rsp) ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq %r11, 528(%rsp) ; \
xorq %r14, %r14 ; \
movq P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x20+P1, %rax ; \
mulq P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq %r12, 536(%rsp) ; \
xorq %r15, %r15 ; \
movq P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x8+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x20+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x28+P1, %rax ; \
mulq P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq %r13, 544(%rsp) ; \
xorq %r8, %r8 ; \
movq P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq %r8, %r8 ; \
movq 0x8+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x10+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x20+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x28+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x30+P1, %rax ; \
mulq P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq %r14, 552(%rsp) ; \
xorq %r9, %r9 ; \
movq P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq %r9, %r9 ; \
movq 0x8+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x10+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x18+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x20+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x28+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x30+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x38+P1, %rax ; \
mulq P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq %r15, 560(%rsp) ; \
xorq %r10, %r10 ; \
movq P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %r10, %r10 ; \
movq 0x8+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x10+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x18+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x28+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x30+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x38+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x40+P1, %rax ; \
mulq P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq %r8, 568(%rsp) ; \
xorq %r11, %r11 ; \
movq 0x8+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
movq 0x10+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x18+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x20+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x28+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x30+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x38+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x40+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq 0x10+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x18+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x20+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x28+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x30+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x38+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x40+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq 0x18+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x20+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x28+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x30+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x38+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x40+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x20+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x28+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x30+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x38+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x40+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x28+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x30+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x38+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x40+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
xorq %r8, %r8 ; \
movq 0x30+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq %r8, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x40+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
imulq 0x40+P2, %rax ; \
addq %r8, %rax ; \
movq 568(%rsp), %r8 ; \
movq %r8, %rdx ; \
andq $0x1ff, %rdx ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rax, %r15 ; \
shrq $0x9, %rax ; \
addq %rax, %rdx ; \
addq 504(%rsp), %r8 ; \
adcq 512(%rsp), %r9 ; \
adcq 520(%rsp), %r10 ; \
adcq 528(%rsp), %r11 ; \
adcq 536(%rsp), %r12 ; \
adcq 544(%rsp), %r13 ; \
adcq 552(%rsp), %r14 ; \
adcq 560(%rsp), %r15 ; \
adcq $0, %rdx ; \
movq %r8, P0 ; \
movq %r9, 0x8+P0 ; \
movq %r10, 0x10+P0 ; \
movq %r11, 0x18+P0 ; \
movq %r12, 0x20+P0 ; \
movq %r13, 0x28+P0 ; \
movq %r14, 0x30+P0 ; \
movq %r15, 0x38+P0 ; \
movq %rdx, 0x40+P0
// P0 = C * P1 - D * P2 == C * P1 + D * (p_521 - P2)
#define cmsub_p521(P0,C,P1,D,P2) \
movq $D, %rcx ; \
movq P2, %rax ; \
notq %rax; \
mulq %rcx; \
movq %rax, %r8 ; \
movq %rdx, %r9 ; \
movq 8+P2, %rax ; \
notq %rax; \
mulq %rcx; \
xorl %r10d, %r10d ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 16+P2, %rax ; \
notq %rax; \
mulq %rcx; \
xorl %r11d, %r11d ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
movq 24+P2, %rax ; \
notq %rax; \
mulq %rcx; \
xorl %r12d, %r12d ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
movq 32+P2, %rax ; \
notq %rax; \
mulq %rcx; \
xorl %r13d, %r13d ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
movq 40+P2, %rax ; \
notq %rax; \
mulq %rcx; \
xorl %r14d, %r14d ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
movq 48+P2, %rax ; \
notq %rax; \
mulq %rcx; \
xorl %r15d, %r15d ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
movq 56+P2, %rax ; \
notq %rax; \
mulq %rcx; \
xorl %ebx, %ebx ; \
addq %rax, %r15 ; \
adcq %rdx, %rbx ; \
movq 64+P2, %rax ; \
xorq $0x1FF, %rax ; \
imulq %rcx, %rax ; \
addq %rax, %rbx ; \
xorl %eax, %eax ; \
movl $C, %ecx ; \
movq P1, %rax ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rbp, %rbp ; \
movq 0x8+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rbp, %rbp ; \
movq 0x10+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbp, %rbp ; \
movq 0x18+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %rbp, %rbp ; \
movq 0x20+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %rbp, %rbp ; \
movq 0x28+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %rbp, %rbp ; \
movq 0x30+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %rbp, %rbp ; \
movq 0x38+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %rbx ; \
movq 0x40+P1, %rax ; \
imulq %rcx, %rax ; \
addq %rax, %rbx ; \
movq %r9, %rax ; \
andq %r10, %rax ; \
andq %r11, %rax ; \
andq %r12, %rax ; \
andq %r13, %rax ; \
andq %r14, %rax ; \
andq %r15, %rax ; \
movq %rbx, %rdx ; \
shrq $9, %rdx ; \
orq $~0x1FF, %rbx ; \
leaq 1(%rdx), %rcx ; \
addq %r8, %rcx ; \
movl $0, %ecx ; \
adcq %rcx, %rax ; \
movq %rbx, %rax ; \
adcq %rcx, %rax ; \
adcq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rcx, %r9 ; \
movq %r9, 8+P0 ; \
adcq %rcx, %r10 ; \
movq %r10, 16+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 24+P0 ; \
adcq %rcx, %r12 ; \
movq %r12, 32+P0 ; \
adcq %rcx, %r13 ; \
movq %r13, 40+P0 ; \
adcq %rcx, %r14 ; \
movq %r14, 48+P0 ; \
adcq %rcx, %r15 ; \
movq %r15, 56+P0 ; \
adcq %rcx, %rbx ; \
andq $0x1FF, %rbx ; \
movq %rbx, 64+P0
// P0 = 3 * P1 - 8 * P2 == 3 * P1 + 8 * (p_521 - P2)
#define cmsub38_p521(P0,P1,P2) \
movq 64+P2, %rbx ; \
xorq $0x1FF, %rbx ; \
movq 56+P2, %r15 ; \
notq %r15; \
shldq $3, %r15, %rbx ; \
movq 48+P2, %r14 ; \
notq %r14; \
shldq $3, %r14, %r15 ; \
movq 40+P2, %r13 ; \
notq %r13; \
shldq $3, %r13, %r14 ; \
movq 32+P2, %r12 ; \
notq %r12; \
shldq $3, %r12, %r13 ; \
movq 24+P2, %r11 ; \
notq %r11; \
shldq $3, %r11, %r12 ; \
movq 16+P2, %r10 ; \
notq %r10; \
shldq $3, %r10, %r11 ; \
movq 8+P2, %r9 ; \
notq %r9; \
shldq $3, %r9, %r10 ; \
movq P2, %r8 ; \
notq %r8; \
shldq $3, %r8, %r9 ; \
shlq $3, %r8 ; \
movl $3, %ecx ; \
movq P1, %rax ; \
mulq %rcx; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
sbbq %rbp, %rbp ; \
movq 0x8+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
sbbq %rbp, %rbp ; \
movq 0x10+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
sbbq %rbp, %rbp ; \
movq 0x18+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
sbbq %rbp, %rbp ; \
movq 0x20+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
sbbq %rbp, %rbp ; \
movq 0x28+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
sbbq %rbp, %rbp ; \
movq 0x30+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
sbbq %rbp, %rbp ; \
movq 0x38+P1, %rax ; \
mulq %rcx; \
subq %rbp, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %rbx ; \
movq 0x40+P1, %rax ; \
imulq %rcx, %rax ; \
addq %rax, %rbx ; \
movq %r9, %rax ; \
andq %r10, %rax ; \
andq %r11, %rax ; \
andq %r12, %rax ; \
andq %r13, %rax ; \
andq %r14, %rax ; \
andq %r15, %rax ; \
movq %rbx, %rdx ; \
shrq $9, %rdx ; \
orq $~0x1FF, %rbx ; \
leaq 1(%rdx), %rcx ; \
addq %r8, %rcx ; \
movl $0, %ecx ; \
adcq %rcx, %rax ; \
movq %rbx, %rax ; \
adcq %rcx, %rax ; \
adcq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rcx, %r9 ; \
movq %r9, 8+P0 ; \
adcq %rcx, %r10 ; \
movq %r10, 16+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 24+P0 ; \
adcq %rcx, %r12 ; \
movq %r12, 32+P0 ; \
adcq %rcx, %r13 ; \
movq %r13, 40+P0 ; \
adcq %rcx, %r14 ; \
movq %r14, 48+P0 ; \
adcq %rcx, %r15 ; \
movq %r15, 56+P0 ; \
adcq %rcx, %rbx ; \
andq $0x1FF, %rbx ; \
movq %rbx, 64+P0
// P0 = 4 * P1 - P2 = 4 * P1 + (p_521 - P2)
#define cmsub41_p521(P0,P1,P2) \
movq 64+P1, %rbx ; \
movq 56+P1, %r15 ; \
shldq $2, %r15, %rbx ; \
movq 48+P1, %r14 ; \
shldq $2, %r14, %r15 ; \
movq 40+P1, %r13 ; \
shldq $2, %r13, %r14 ; \
movq 32+P1, %r12 ; \
shldq $2, %r12, %r13 ; \
movq 24+P1, %r11 ; \
shldq $2, %r11, %r12 ; \
movq 16+P1, %r10 ; \
shldq $2, %r10, %r11 ; \
movq 8+P1, %r9 ; \
shldq $2, %r9, %r10 ; \
movq P1, %r8 ; \
shldq $2, %r8, %r9 ; \
shlq $2, %r8 ; \
movq 64+P2, %rcx ; \
xorq $0x1FF, %rcx ; \
movq P2, %rax ; \
notq %rax; \
addq %rax, %r8 ; \
movq 8+P2, %rax ; \
notq %rax; \
adcq %rax, %r9 ; \
movq 16+P2, %rax ; \
notq %rax; \
adcq %rax, %r10 ; \
movq 24+P2, %rax ; \
notq %rax; \
adcq %rax, %r11 ; \
movq 32+P2, %rax ; \
notq %rax; \
adcq %rax, %r12 ; \
movq 40+P2, %rax ; \
notq %rax; \
adcq %rax, %r13 ; \
movq 48+P2, %rax ; \
notq %rax; \
adcq %rax, %r14 ; \
movq 56+P2, %rax ; \
notq %rax; \
adcq %rax, %r15 ; \
adcq %rcx, %rbx ; \
movq %r9, %rax ; \
andq %r10, %rax ; \
andq %r11, %rax ; \
andq %r12, %rax ; \
andq %r13, %rax ; \
andq %r14, %rax ; \
andq %r15, %rax ; \
movq %rbx, %rdx ; \
shrq $9, %rdx ; \
orq $~0x1FF, %rbx ; \
leaq 1(%rdx), %rcx ; \
addq %r8, %rcx ; \
movl $0, %ecx ; \
adcq %rcx, %rax ; \
movq %rbx, %rax ; \
adcq %rcx, %rax ; \
adcq %rdx, %r8 ; \
movq %r8, P0 ; \
adcq %rcx, %r9 ; \
movq %r9, 8+P0 ; \
adcq %rcx, %r10 ; \
movq %r10, 16+P0 ; \
adcq %rcx, %r11 ; \
movq %r11, 24+P0 ; \
adcq %rcx, %r12 ; \
movq %r12, 32+P0 ; \
adcq %rcx, %r13 ; \
movq %r13, 40+P0 ; \
adcq %rcx, %r14 ; \
movq %r14, 48+P0 ; \
adcq %rcx, %r15 ; \
movq %r15, 56+P0 ; \
adcq %rcx, %rbx ; \
andq $0x1FF, %rbx ; \
movq %rbx, 64+P0
S2N_BN_SYMBOL(p521_jdouble_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room on stack for temporary variables
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Main code, just a sequence of basic field operations
// z2 = z^2
// y2 = y^2
sqr_p521(z2,z_1)
sqr_p521(y2,y_1)
// x2p = x^2 - z^4 = (x + z^2) * (x - z^2)
add_p521(t1,x_1,z2)
sub_p521(t2,x_1,z2)
mul_p521(x2p,t1,t2)
// t1 = y + z
// x4p = x2p^2
// xy2 = x * y^2
add_p521(t1,y_1,z_1)
sqr_p521(x4p,x2p)
weakmul_p521(xy2,x_1,y2)
// t2 = (y + z)^2
sqr_p521(t2,t1)
// d = 12 * xy2 - 9 * x4p
// t1 = y^2 + 2 * y * z
cmsub_p521(d,12,xy2,9,x4p)
sub_p521(t1,t2,z2)
// y4 = y^4
sqr_p521(y4,y2)
// z_3' = 2 * y * z
// dx2 = d * x2p
sub_p521(z_3,t1,y2)
weakmul_p521(dx2,d,x2p)
// x' = 4 * xy2 - d
cmsub41_p521(x_3,xy2,d)
// y' = 3 * dx2 - 8 * y4
cmsub38_p521(y_3,dx2,y4)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p521_jdouble_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 9,385
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_mul_p521_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply modulo p_521, z := (x * y) mod p_521, assuming x and y reduced
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_mul_p521_alt(uint64_t z[static 9],
// const uint64_t x[static 9],
// const uint64_t y[static 9]);
//
// Standard x86-64 ABI: RDI = z, RSI = x, RDX = y
// Microsoft x64 ABI: RCX = z, RDX = x, R8 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p521_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_p521_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p521_alt)
.text
#define z %rdi
#define x %rsi
// This is moved from %rdx to free it for muls
#define y %rcx
// Macro for the key "multiply and add to (c,h,l)" step
#define combadd(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
// A minutely shorter form for when c = 0 initially
#define combadz(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq c, c
// A short form where we don't expect a top carry
#define combads(h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h
S2N_BN_SYMBOL(bignum_mul_p521_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Make more registers available and make temporary space on stack
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(72)
// Copy y into a safe register to start with
movq %rdx, y
// Start doing a conventional columnwise multiplication,
// temporarily storing the lower 9 digits to the stack.
// Start with result term 0
movq (x), %rax
mulq (y)
movq %rax, (%rsp)
movq %rdx, %r9
xorq %r10, %r10
// Result term 1
xorq %r11, %r11
combads(%r10,%r9,(x),8(y))
combadz(%r11,%r10,%r9,8(x),(y))
movq %r9, 8(%rsp)
// Result term 2
xorq %r12, %r12
combadz(%r12,%r11,%r10,(x),16(y))
combadd(%r12,%r11,%r10,8(x),8(y))
combadd(%r12,%r11,%r10,16(x),(y))
movq %r10, 16(%rsp)
// Result term 3
xorq %r13, %r13
combadz(%r13,%r12,%r11,(x),24(y))
combadd(%r13,%r12,%r11,8(x),16(y))
combadd(%r13,%r12,%r11,16(x),8(y))
combadd(%r13,%r12,%r11,24(x),(y))
movq %r11, 24(%rsp)
// Result term 4
xorq %r14, %r14
combadz(%r14,%r13,%r12,(x),32(y))
combadd(%r14,%r13,%r12,8(x),24(y))
combadd(%r14,%r13,%r12,16(x),16(y))
combadd(%r14,%r13,%r12,24(x),8(y))
combadd(%r14,%r13,%r12,32(x),(y))
movq %r12, 32(%rsp)
// Result term 5
xorq %r15, %r15
combadz(%r15,%r14,%r13,(x),40(y))
combadd(%r15,%r14,%r13,8(x),32(y))
combadd(%r15,%r14,%r13,16(x),24(y))
combadd(%r15,%r14,%r13,24(x),16(y))
combadd(%r15,%r14,%r13,32(x),8(y))
combadd(%r15,%r14,%r13,40(x),(y))
movq %r13, 40(%rsp)
// Result term 6
xorq %r8, %r8
combadz(%r8,%r15,%r14,(x),48(y))
combadd(%r8,%r15,%r14,8(x),40(y))
combadd(%r8,%r15,%r14,16(x),32(y))
combadd(%r8,%r15,%r14,24(x),24(y))
combadd(%r8,%r15,%r14,32(x),16(y))
combadd(%r8,%r15,%r14,40(x),8(y))
combadd(%r8,%r15,%r14,48(x),(y))
movq %r14, 48(%rsp)
// Result term 7
xorq %r9, %r9
combadz(%r9,%r8,%r15,(x),56(y))
combadd(%r9,%r8,%r15,8(x),48(y))
combadd(%r9,%r8,%r15,16(x),40(y))
combadd(%r9,%r8,%r15,24(x),32(y))
combadd(%r9,%r8,%r15,32(x),24(y))
combadd(%r9,%r8,%r15,40(x),16(y))
combadd(%r9,%r8,%r15,48(x),8(y))
combadd(%r9,%r8,%r15,56(x),(y))
movq %r15, 56(%rsp)
// Result term 8
xorq %r10, %r10
combadz(%r10,%r9,%r8,(x),64(y))
combadd(%r10,%r9,%r8,8(x),56(y))
combadd(%r10,%r9,%r8,16(x),48(y))
combadd(%r10,%r9,%r8,24(x),40(y))
combadd(%r10,%r9,%r8,32(x),32(y))
combadd(%r10,%r9,%r8,40(x),24(y))
combadd(%r10,%r9,%r8,48(x),16(y))
combadd(%r10,%r9,%r8,56(x),8(y))
combadd(%r10,%r9,%r8,64(x),(y))
movq %r8, 64(%rsp)
// At this point we suspend writing back results and collect them
// in a register window. Next is result term 9
xorq %r11, %r11
combadz(%r11,%r10,%r9,8(x),64(y))
combadd(%r11,%r10,%r9,16(x),56(y))
combadd(%r11,%r10,%r9,24(x),48(y))
combadd(%r11,%r10,%r9,32(x),40(y))
combadd(%r11,%r10,%r9,40(x),32(y))
combadd(%r11,%r10,%r9,48(x),24(y))
combadd(%r11,%r10,%r9,56(x),16(y))
combadd(%r11,%r10,%r9,64(x),8(y))
// Result term 10
xorq %r12, %r12
combadz(%r12,%r11,%r10,16(x),64(y))
combadd(%r12,%r11,%r10,24(x),56(y))
combadd(%r12,%r11,%r10,32(x),48(y))
combadd(%r12,%r11,%r10,40(x),40(y))
combadd(%r12,%r11,%r10,48(x),32(y))
combadd(%r12,%r11,%r10,56(x),24(y))
combadd(%r12,%r11,%r10,64(x),16(y))
// Result term 11
xorq %r13, %r13
combadz(%r13,%r12,%r11,24(x),64(y))
combadd(%r13,%r12,%r11,32(x),56(y))
combadd(%r13,%r12,%r11,40(x),48(y))
combadd(%r13,%r12,%r11,48(x),40(y))
combadd(%r13,%r12,%r11,56(x),32(y))
combadd(%r13,%r12,%r11,64(x),24(y))
// Result term 12
xorq %r14, %r14
combadz(%r14,%r13,%r12,32(x),64(y))
combadd(%r14,%r13,%r12,40(x),56(y))
combadd(%r14,%r13,%r12,48(x),48(y))
combadd(%r14,%r13,%r12,56(x),40(y))
combadd(%r14,%r13,%r12,64(x),32(y))
// Result term 13
xorq %r15, %r15
combadz(%r15,%r14,%r13,40(x),64(y))
combadd(%r15,%r14,%r13,48(x),56(y))
combadd(%r15,%r14,%r13,56(x),48(y))
combadd(%r15,%r14,%r13,64(x),40(y))
// Result term 14
xorq %r8, %r8
combadz(%r8,%r15,%r14,48(x),64(y))
combadd(%r8,%r15,%r14,56(x),56(y))
combadd(%r8,%r15,%r14,64(x),48(y))
// Result term 15
combads(%r8,%r15,56(x),64(y))
combads(%r8,%r15,64(x),56(y))
// Result term 16
movq 64(x), %rax
imulq 64(y), %rax
addq %r8, %rax
// Now the upper portion is [%rax;%r15;%r14;%r13;%r12;%r11;%r10;%r9;[%rsp+64]].
// Rotate the upper portion right 9 bits since 2^512 == 2^-9 (mod p_521)
// Let rotated result %rdx,%r15,%r14,...,%r8 be h (high) and %rsp[0..7] be l (low)
movq 64(%rsp), %r8
movq %r8, %rdx
andq $0x1FF, %rdx
shrdq $9, %r9, %r8
shrdq $9, %r10, %r9
shrdq $9, %r11, %r10
shrdq $9, %r12, %r11
shrdq $9, %r13, %r12
shrdq $9, %r14, %r13
shrdq $9, %r15, %r14
shrdq $9, %rax, %r15
shrq $9, %rax
addq %rax, %rdx
// Force carry-in then add to get s = h + l + 1
// but actually add all 1s in the top 53 bits to get simple carry out
stc
adcq (%rsp), %r8
adcq 8(%rsp), %r9
adcq 16(%rsp), %r10
adcq 24(%rsp), %r11
adcq 32(%rsp), %r12
adcq 40(%rsp), %r13
adcq 48(%rsp), %r14
adcq 56(%rsp), %r15
adcq $~0x1FF, %rdx
// Now CF is set <=> h + l + 1 >= 2^521 <=> h + l >= p_521,
// in which case the lower 521 bits are already right. Otherwise if
// CF is clear, we want to subtract 1. Hence subtract the complement
// of the carry flag then mask the top word, which scrubs the
// padding in either case. Write digits back as they are created.
cmc
sbbq $0, %r8
movq %r8, (z)
sbbq $0, %r9
movq %r9, 8(z)
sbbq $0, %r10
movq %r10, 16(z)
sbbq $0, %r11
movq %r11, 24(z)
sbbq $0, %r12
movq %r12, 32(z)
sbbq $0, %r13
movq %r13, 40(z)
sbbq $0, %r14
movq %r14, 48(z)
sbbq $0, %r15
movq %r15, 56(z)
sbbq $0, %rdx
andq $0x1FF, %rdx
movq %rdx, 64(z)
// Restore registers and return
CFI_INC_RSP(72)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mul_p521_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 1,915
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_tolebytes_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert 9-digit 528-bit bignum to little-endian bytes
//
// extern void bignum_tolebytes_p521(uint8_t z[static 66],
// const uint64_t x[static 9]);
//
// This is assuming the input x is < 2^528 so that it fits in 66 bytes.
// In particular this holds if x < p_521 < 2^521 < 2^528.
//
// Since x86 is little-endian, this is just copying.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tolebytes_p521)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tolebytes_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tolebytes_p521)
.text
#define z %rdi
#define x %rsi
#define a %rax
S2N_BN_SYMBOL(bignum_tolebytes_p521):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
movq (x), a
movq a, (z)
movq 8(x), a
movq a, 8(z)
movq 16(x), a
movq a, 16(z)
movq 24(x), a
movq a, 24(z)
movq 32(x), a
movq a, 32(z)
movq 40(x), a
movq a, 40(z)
movq 48(x), a
movq a, 48(z)
movq 56(x), a
movq a, 56(z)
movq 64(x), a
movw %ax, 64(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_tolebytes_p521)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 3,704
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_tomont_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert to Montgomery form z := (2^576 * x) mod p_521
// Input x[9]; output z[9]
//
// extern void bignum_tomont_p521(uint64_t z[static 9],
// const uint64_t x[static 9]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p521)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p521)
.text
#define z %rdi
#define x %rsi
#define d0 %rax
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define d4 %r10
#define d5 %r11
#define d6 %rbx
#define d8 %rdx
#define d8short %edx
// Re-use the input pointer as other variable once safe to do so
#define d7 %rsi
S2N_BN_SYMBOL(bignum_tomont_p521):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save one more register
CFI_PUSH(%rbx)
// Separate out the input into x = 2^521 * H + L, so that x mod p_521 =
// (H + L) mod p_521 = if H + L >= p_521 then H + L - p_521 else H + L.
movq 64(x), d0
movl $0x1FF, d8short
andq d0, d8
shrq $9, d0
// Force carry-in to get s = [d8;d7;d6;d5;d4;d3;d2;d1;d0] = H + L + 1.
stc
adcq (x), d0
movq 8(x), d1
adcq $0, d1
movq 16(x), d2
adcq $0, d2
movq 24(x), d3
adcq $0, d3
movq 32(x), d4
adcq $0, d4
movq 40(x), d5
adcq $0, d5
movq 48(x), d6
adcq $0, d6
movq 56(x), d7
adcq $0, d7
adcq $0, d8
// Set CF <=> s < 2^521 <=> H + L < p_521, so that if CF is set
// we want H + L = s - 1, otherwise (H + L) - p_521 = s - 2^521.
// This is done with just s - CF then masking to 521 bits.
cmpq $512, d8
sbbq $0, d0
sbbq $0, d1
sbbq $0, d2
sbbq $0, d3
sbbq $0, d4
sbbq $0, d5
sbbq $0, d6
sbbq $0, d7
sbbq $0, d8
// So far, this is just a modular reduction as in bignum_mod_p521_9,
// except that the final masking of d8 is skipped since that comes out
// in the wash anyway from the next block, which is the Montgomery map,
// multiplying by 2^576 modulo p_521. Because 2^521 == 1 (mod p_521)
// this is just rotation left by 576 - 521 = 55 bits. Store back
// digits as created, though in a slightly peculiar order because we
// want to avoid using another register.
shldq $55, d7, d8
shldq $55, d6, d7
movq d7, 56(z)
shldq $55, d5, d6
movq d6, 48(z)
shldq $55, d4, d5
movq d5, 40(z)
shldq $55, d3, d4
movq d4, 32(z)
shldq $55, d2, d3
movq d3, 24(z)
shldq $55, d1, d2
movq d2, 16(z)
shldq $55, d0, d1
movq d1, 8(z)
shldq $55, d8, d0
movq d0, (z)
andq $0x1FF, d8
movq d8, 64(z)
// Restore register
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_tomont_p521)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 2,627
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_neg_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Negate modulo p_521, z := (-x) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_neg_p521(uint64_t z[static 9], const uint64_t x[static 9]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_p521)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_neg_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_p521)
.text
#define z %rdi
#define x %rsi
#define p %rax
#define d0 %rcx
#define d1 %rdx
#define d2 %r8
#define d3 %r9
#define d4 %r10
#define d5 %r11
S2N_BN_SYMBOL(bignum_neg_p521):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load most inputs (into the limited registers) and OR all of them to get p
movq (x), d0
movq d0, p
movq 8(x), d1
orq d1, p
movq 16(x), d2
orq d2, p
movq 24(x), d3
orq d3, p
movq 32(x), d4
orq d4, p
movq 40(x), d5
orq d5, p
orq 48(x), p
orq 56(x), p
orq 64(x), p
// Turn p into a bitmask for "input is nonzero", so that we avoid doing
// -0 = p_521 and hence maintain strict modular reduction
negq p
sbbq p, p
// Since p_521 is all 1s, the subtraction is just an exclusive-or with p
// to give an optional inversion, with a slight fiddle for the top digit.
xorq p, d0
movq d0, (z)
xorq p, d1
movq d1, 8(z)
xorq p, d2
movq d2, 16(z)
xorq p, d3
movq d3, 24(z)
xorq p, d4
movq d4, 32(z)
xorq p, d5
movq d5, 40(z)
movq 48(x), d0
xorq p, d0
movq d0, 48(z)
movq 56(x), d1
xorq p, d1
movq d1, 56(z)
movq 64(x), d2
andq $0x1FF, p
xorq p, d2
movq d2, 64(z)
// Return
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_neg_p521)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 100,141
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_inv_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Modular inverse modulo p_521 = 2^521 - 1
// Input x[9]; output z[9]
//
// extern void bignum_inv_p521(uint64_t z[static 9],const uint64_t x[static 9]);
//
// Assuming the 9-digit input x is coprime to p_521, i.e. is not divisible
// by it, returns z < p_521 such that x * z == 1 (mod p_521). Note that
// x does not need to be reduced modulo p_521, but the output always is.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_inv_p521)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_inv_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_inv_p521)
.text
// Size in bytes of a 64-bit word
#define N 8
// Pointer-offset pairs for temporaries on stack
#define f 0(%rsp)
#define g (9*N)(%rsp)
#define u (18*N)(%rsp)
#define v (27*N)(%rsp)
#define tmp (36*N)(%rsp)
#define tmp2 (37*N)(%rsp)
#define i (38*N)(%rsp)
#define d (39*N)(%rsp)
#define mat (40*N)(%rsp)
// Backup for the input pointer
#define res (44*N)(%rsp)
// Total size to reserve on the stack
#define NSPACE 45*N
// Syntactic variants to make x86_att version simpler to generate
#define F 0
#define G (9*N)
#define U (18*N)
#define V (27*N)
#define MAT (40*N)
#define ff (%rsp)
#define gg (9*N)(%rsp)
// Very similar to a subroutine call to the s2n-bignum word_divstep59.
// But different in register usage and returning the final matrix as
//
// [ %r8 %r10]
// [ %r12 %r14]
//
// and also returning the matrix still negated (which doesn't matter)
#define divstep59(din,fin,gin) \
movq din, %rsi ; \
movq fin, %rdx ; \
movq gin, %rcx ; \
movq %rdx, %rbx ; \
andq $0xfffff, %rbx ; \
movabsq $0xfffffe0000000000, %rax ; \
orq %rax, %rbx ; \
andq $0xfffff, %rcx ; \
movabsq $0xc000000000000000, %rax ; \
orq %rax, %rcx ; \
movq $0xfffffffffffffffe, %rax ; \
xorl %ebp, %ebp ; \
movl $0x2, %edx ; \
movq %rbx, %rdi ; \
movq %rax, %r8 ; \
testq %rsi, %rsi ; \
cmovs %rbp, %r8 ; \
testq $0x1, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
sarq $1, %rcx ; \
movl $0x100000, %eax ; \
leaq (%rbx,%rax), %rdx ; \
leaq (%rcx,%rax), %rdi ; \
shlq $0x16, %rdx ; \
shlq $0x16, %rdi ; \
sarq $0x2b, %rdx ; \
sarq $0x2b, %rdi ; \
movabsq $0x20000100000, %rax ; \
leaq (%rbx,%rax), %rbx ; \
leaq (%rcx,%rax), %rcx ; \
sarq $0x2a, %rbx ; \
sarq $0x2a, %rcx ; \
movq %rdx, MAT(%rsp) ; \
movq %rbx, MAT+0x8(%rsp) ; \
movq %rdi, MAT+0x10(%rsp) ; \
movq %rcx, MAT+0x18(%rsp) ; \
movq fin, %r12 ; \
imulq %r12, %rdi ; \
imulq %rdx, %r12 ; \
movq gin, %r13 ; \
imulq %r13, %rbx ; \
imulq %rcx, %r13 ; \
addq %rbx, %r12 ; \
addq %rdi, %r13 ; \
sarq $0x14, %r12 ; \
sarq $0x14, %r13 ; \
movq %r12, %rbx ; \
andq $0xfffff, %rbx ; \
movabsq $0xfffffe0000000000, %rax ; \
orq %rax, %rbx ; \
movq %r13, %rcx ; \
andq $0xfffff, %rcx ; \
movabsq $0xc000000000000000, %rax ; \
orq %rax, %rcx ; \
movq $0xfffffffffffffffe, %rax ; \
movl $0x2, %edx ; \
movq %rbx, %rdi ; \
movq %rax, %r8 ; \
testq %rsi, %rsi ; \
cmovs %rbp, %r8 ; \
testq $0x1, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
sarq $1, %rcx ; \
movl $0x100000, %eax ; \
leaq (%rbx,%rax), %r8 ; \
leaq (%rcx,%rax), %r10 ; \
shlq $0x16, %r8 ; \
shlq $0x16, %r10 ; \
sarq $0x2b, %r8 ; \
sarq $0x2b, %r10 ; \
movabsq $0x20000100000, %rax ; \
leaq (%rbx,%rax), %r15 ; \
leaq (%rcx,%rax), %r11 ; \
sarq $0x2a, %r15 ; \
sarq $0x2a, %r11 ; \
movq %r13, %rbx ; \
movq %r12, %rcx ; \
imulq %r8, %r12 ; \
imulq %r15, %rbx ; \
addq %rbx, %r12 ; \
imulq %r11, %r13 ; \
imulq %r10, %rcx ; \
addq %rcx, %r13 ; \
sarq $0x14, %r12 ; \
sarq $0x14, %r13 ; \
movq %r12, %rbx ; \
andq $0xfffff, %rbx ; \
movabsq $0xfffffe0000000000, %rax ; \
orq %rax, %rbx ; \
movq %r13, %rcx ; \
andq $0xfffff, %rcx ; \
movabsq $0xc000000000000000, %rax ; \
orq %rax, %rcx ; \
movq MAT(%rsp), %rax ; \
imulq %r8, %rax ; \
movq MAT+0x10(%rsp), %rdx ; \
imulq %r15, %rdx ; \
imulq MAT+0x8(%rsp), %r8 ; \
imulq MAT+0x18(%rsp), %r15 ; \
addq %r8, %r15 ; \
leaq (%rax,%rdx), %r9 ; \
movq MAT(%rsp), %rax ; \
imulq %r10, %rax ; \
movq MAT+0x10(%rsp), %rdx ; \
imulq %r11, %rdx ; \
imulq MAT+0x8(%rsp), %r10 ; \
imulq MAT+0x18(%rsp), %r11 ; \
addq %r10, %r11 ; \
leaq (%rax,%rdx), %r13 ; \
movq $0xfffffffffffffffe, %rax ; \
movl $0x2, %edx ; \
movq %rbx, %rdi ; \
movq %rax, %r8 ; \
testq %rsi, %rsi ; \
cmovs %rbp, %r8 ; \
testq $0x1, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
cmovs %rbp, %r8 ; \
movq %rbx, %rdi ; \
testq %rdx, %rcx ; \
cmoveq %rbp, %r8 ; \
cmoveq %rbp, %rdi ; \
sarq $1, %rcx ; \
xorq %r8, %rdi ; \
xorq %r8, %rsi ; \
btq $0x3f, %r8 ; \
cmovbq %rcx, %rbx ; \
movq %rax, %r8 ; \
subq %rax, %rsi ; \
leaq (%rcx,%rdi), %rcx ; \
sarq $1, %rcx ; \
movl $0x100000, %eax ; \
leaq (%rbx,%rax), %r8 ; \
leaq (%rcx,%rax), %r12 ; \
shlq $0x15, %r8 ; \
shlq $0x15, %r12 ; \
sarq $0x2b, %r8 ; \
sarq $0x2b, %r12 ; \
movabsq $0x20000100000, %rax ; \
leaq (%rbx,%rax), %r10 ; \
leaq (%rcx,%rax), %r14 ; \
sarq $0x2b, %r10 ; \
sarq $0x2b, %r14 ; \
movq %r9, %rax ; \
imulq %r8, %rax ; \
movq %r13, %rdx ; \
imulq %r10, %rdx ; \
imulq %r15, %r8 ; \
imulq %r11, %r10 ; \
addq %r8, %r10 ; \
leaq (%rax,%rdx), %r8 ; \
movq %r9, %rax ; \
imulq %r12, %rax ; \
movq %r13, %rdx ; \
imulq %r14, %rdx ; \
imulq %r15, %r12 ; \
imulq %r11, %r14 ; \
addq %r12, %r14 ; \
leaq (%rax,%rdx), %r12
S2N_BN_SYMBOL(bignum_inv_p521):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save registers and make room for temporaries
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Save the return pointer for the end so we can overwrite %rdi later
movq %rdi, res
// Copy the prime p_521 = 2^521 - 1 into the f variable
xorl %eax, %eax
notq %rax
movq %rax, F(%rsp)
movq %rax, F+8(%rsp)
movq %rax, F+16(%rsp)
movq %rax, F+24(%rsp)
movq %rax, F+32(%rsp)
movq %rax, F+40(%rsp)
movq %rax, F+48(%rsp)
movq %rax, F+56(%rsp)
movl $0x1FF, %eax
movq %rax, F+64(%rsp)
// Copy the input into the g variable, but reduce it strictly mod p_521
// so that g <= f as assumed in the bound proof. This code fragment is
// very similar to bignum_mod_p521_9.
movq 64(%rsi), %r8
movl $0x1FF, %ebx
andq %r8, %rbx
shrq $9, %r8
stc
adcq (%rsi), %r8
movq 8(%rsi), %r9
adcq $0, %r9
movq 16(%rsi), %r10
adcq $0, %r10
movq 24(%rsi), %r11
adcq $0, %r11
movq 32(%rsi), %r12
adcq $0, %r12
movq 40(%rsi), %r13
adcq $0, %r13
movq 48(%rsi), %r14
adcq $0, %r14
movq 56(%rsi), %r15
adcq $0, %r15
adcq $0, %rbx
cmpq $512, %rbx
sbbq $0, %r8
movq %r8, G(%rsp)
sbbq $0, %r9
movq %r9, G+8(%rsp)
sbbq $0, %r10
movq %r10, G+16(%rsp)
sbbq $0, %r11
movq %r11, G+24(%rsp)
sbbq $0, %r12
movq %r12, G+32(%rsp)
sbbq $0, %r13
movq %r13, G+40(%rsp)
sbbq $0, %r14
movq %r14, G+48(%rsp)
sbbq $0, %r15
movq %r15, G+56(%rsp)
sbbq $0, %rbx
andq $0x1FF, %rbx
movq %rbx, G+64(%rsp)
// Also maintain weakly reduced < 2*p_521 vector [u,v] such that
// [f,g] == x * 2^{1239-59*i} * [u,v] (mod p_521)
// starting with [p_521,x] == x * 2^{1239-59*0} * [0,2^-1239] (mod p_521)
// Note that because (2^{a+521} == 2^a) (mod p_521) we simply have
// (2^-1239 == 2^324) (mod p_521) so the constant initializer is simple.
//
// Based on the standard divstep bound, for inputs <= 2^b we need at least
// n >= (9437 * b + 1) / 4096. Since b is 521, that means 1201 iterations.
// Since we package divstep in multiples of 59 bits, we do 21 blocks of 59
// making *1239* total. (With a bit more effort we could avoid the full 59
// divsteps and use a shorter tail computation, but we keep it simple.)
// Hence, after the 21st iteration we have [f,g] == x * [u,v] and since
// |f| = 1 we get the modular inverse from u by flipping its sign with f.
xorl %eax, %eax
movq %rax, U(%rsp)
movq %rax, U+8(%rsp)
movq %rax, U+16(%rsp)
movq %rax, U+24(%rsp)
movq %rax, U+32(%rsp)
movq %rax, U+40(%rsp)
movq %rax, U+48(%rsp)
movq %rax, U+56(%rsp)
movq %rax, U+64(%rsp)
movl $16, %ebx
movq %rax, V(%rsp)
movq %rax, V+8(%rsp)
movq %rax, V+16(%rsp)
movq %rax, V+24(%rsp)
movq %rax, V+32(%rsp)
movq %rbx, V+40(%rsp)
movq %rax, V+48(%rsp)
movq %rax, V+56(%rsp)
movq %rax, V+64(%rsp)
// Start of main loop. We jump into the middle so that the divstep
// portion is common to the special 21st iteration after a uniform
// first 20.
movq $21, i
movq $1, d
jmp Lbignum_inv_p521_midloop
Lbignum_inv_p521_loop:
// Separate out the matrix into sign-magnitude pairs
movq %r8, %r9
sarq $63, %r9
xorq %r9, %r8
subq %r9, %r8
movq %r10, %r11
sarq $63, %r11
xorq %r11, %r10
subq %r11, %r10
movq %r12, %r13
sarq $63, %r13
xorq %r13, %r12
subq %r13, %r12
movq %r14, %r15
sarq $63, %r15
xorq %r15, %r14
subq %r15, %r14
// Adjust the initial values to allow for complement instead of negation
// This initial offset is the same for [f,g] and [u,v] compositions.
// Save it in temporary storage for the [u,v] part and do [f,g] first.
movq %r8, %rax
andq %r9, %rax
movq %r10, %rdi
andq %r11, %rdi
addq %rax, %rdi
movq %rdi, tmp
movq %r12, %rax
andq %r13, %rax
movq %r14, %rsi
andq %r15, %rsi
addq %rax, %rsi
movq %rsi, tmp2
// Now the computation of the updated f and g values. This maintains a
// 2-word carry between stages so we can conveniently insert the shift
// right by 59 before storing back, and not overwrite digits we need
// again of the old f and g values.
//
// Digit 0 of [f,g]
xorl %ebx, %ebx
movq F(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rdi
adcq %rdx, %rbx
movq G(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rdi
adcq %rdx, %rbx
xorl %ebp, %ebp
movq F(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rsi
adcq %rdx, %rbp
movq G(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rsi
adcq %rdx, %rbp
// Digit 1 of [f,g]
xorl %ecx, %ecx
movq F+N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq G+N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
shrdq $59, %rbx, %rdi
movq %rdi, F(%rsp)
xorl %edi, %edi
movq F+N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rbp
adcq %rdx, %rdi
movq G+N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rdi
shrdq $59, %rbp, %rsi
movq %rsi, G(%rsp)
// Digit 2 of [f,g]
xorl %esi, %esi
movq F+2*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rsi
movq G+2*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
adcq %rdx, %rsi
shrdq $59, %rcx, %rbx
movq %rbx, F+N(%rsp)
xorl %ebx, %ebx
movq F+2*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rdi
adcq %rdx, %rbx
movq G+2*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rdi
adcq %rdx, %rbx
shrdq $59, %rdi, %rbp
movq %rbp, G+N(%rsp)
// Digit 3 of [f,g]
xorl %ebp, %ebp
movq F+3*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rsi
adcq %rdx, %rbp
movq G+3*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rsi
adcq %rdx, %rbp
shrdq $59, %rsi, %rcx
movq %rcx, F+2*N(%rsp)
xorl %ecx, %ecx
movq F+3*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rbx
adcq %rdx, %rcx
movq G+3*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbx
adcq %rdx, %rcx
shrdq $59, %rbx, %rdi
movq %rdi, G+2*N(%rsp)
// Digit 4 of [f,g]
xorl %edi, %edi
movq F+4*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbp
adcq %rdx, %rdi
movq G+4*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbp
adcq %rdx, %rdi
shrdq $59, %rbp, %rsi
movq %rsi, F+3*N(%rsp)
xorl %esi, %esi
movq F+4*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rcx
adcq %rdx, %rsi
movq G+4*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rcx
adcq %rdx, %rsi
shrdq $59, %rcx, %rbx
movq %rbx, G+3*N(%rsp)
// Digit 5 of [f,g]
xorl %ebx, %ebx
movq F+5*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rdi
adcq %rdx, %rbx
movq G+5*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rdi
adcq %rdx, %rbx
shrdq $59, %rdi, %rbp
movq %rbp, F+4*N(%rsp)
xorl %ebp, %ebp
movq F+5*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rsi
adcq %rdx, %rbp
movq G+5*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rsi
adcq %rdx, %rbp
shrdq $59, %rsi, %rcx
movq %rcx, G+4*N(%rsp)
// Digit 6 of [f,g]
xorl %ecx, %ecx
movq F+6*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq G+6*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
shrdq $59, %rbx, %rdi
movq %rdi, F+5*N(%rsp)
xorl %edi, %edi
movq F+6*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rbp
adcq %rdx, %rdi
movq G+6*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rdi
shrdq $59, %rbp, %rsi
movq %rsi, G+5*N(%rsp)
// Digit 7 of [f,g]
xorl %esi, %esi
movq F+7*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rsi
movq G+7*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
adcq %rdx, %rsi
shrdq $59, %rcx, %rbx
movq %rbx, F+6*N(%rsp)
xorl %ebx, %ebx
movq F+7*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
addq %rax, %rdi
adcq %rdx, %rbx
movq G+7*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rdi
adcq %rdx, %rbx
shrdq $59, %rdi, %rbp
movq %rbp, G+6*N(%rsp)
// Digits 8 and 9 of [f,g]
movq F+8*N(%rsp), %rax
xorq %r9, %rax
movq %rax, %rbp
sarq $63, %rbp
andq %r8, %rbp
negq %rbp
mulq %r8
addq %rax, %rsi
adcq %rdx, %rbp
movq G+8*N(%rsp), %rax
xorq %r11, %rax
movq %rax, %rdx
sarq $63, %rdx
andq %r10, %rdx
subq %rdx, %rbp
mulq %r10
addq %rax, %rsi
adcq %rdx, %rbp
shrdq $59, %rsi, %rcx
movq %rcx, F+7*N(%rsp)
shrdq $59, %rbp, %rsi
movq F+8*N(%rsp), %rax
movq %rsi, F+8*N(%rsp)
xorq %r13, %rax
movq %rax, %rsi
sarq $63, %rsi
andq %r12, %rsi
negq %rsi
mulq %r12
addq %rax, %rbx
adcq %rdx, %rsi
movq G+8*N(%rsp), %rax
xorq %r15, %rax
movq %rax, %rdx
sarq $63, %rdx
andq %r14, %rdx
subq %rdx, %rsi
mulq %r14
addq %rax, %rbx
adcq %rdx, %rsi
shrdq $59, %rbx, %rdi
movq %rdi, G+7*N(%rsp)
shrdq $59, %rsi, %rbx
movq %rbx, G+8*N(%rsp)
// Get the initial carries back from storage and do the [u,v] accumulation
movq tmp, %rbx
movq tmp2, %rbp
// Digit 0 of [u,v]
xorl %ecx, %ecx
movq U(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
xorl %esi, %esi
movq U(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rbx, U(%rsp)
addq %rax, %rbp
adcq %rdx, %rsi
movq V(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rsi
movq %rbp, V(%rsp)
// Digit 1 of [u,v]
xorl %ebx, %ebx
movq U+N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
adcq %rdx, %rbx
xorl %ebp, %ebp
movq U+N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rcx, U+N(%rsp)
addq %rax, %rsi
adcq %rdx, %rbp
movq V+N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rsi
adcq %rdx, %rbp
movq %rsi, V+N(%rsp)
// Digit 2 of [u,v]
xorl %ecx, %ecx
movq U+2*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V+2*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
xorl %esi, %esi
movq U+2*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rbx, U+2*N(%rsp)
addq %rax, %rbp
adcq %rdx, %rsi
movq V+2*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rsi
movq %rbp, V+2*N(%rsp)
// Digit 3 of [u,v]
xorl %ebx, %ebx
movq U+3*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+3*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
adcq %rdx, %rbx
xorl %ebp, %ebp
movq U+3*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rcx, U+3*N(%rsp)
addq %rax, %rsi
adcq %rdx, %rbp
movq V+3*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rsi
adcq %rdx, %rbp
movq %rsi, V+3*N(%rsp)
// Digit 4 of [u,v]
xorl %ecx, %ecx
movq U+4*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V+4*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
xorl %esi, %esi
movq U+4*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rbx, U+4*N(%rsp)
addq %rax, %rbp
adcq %rdx, %rsi
movq V+4*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rsi
movq %rbp, V+4*N(%rsp)
// Digit 5 of [u,v]
xorl %ebx, %ebx
movq U+5*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+5*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
adcq %rdx, %rbx
xorl %ebp, %ebp
movq U+5*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rcx, U+5*N(%rsp)
addq %rax, %rsi
adcq %rdx, %rbp
movq V+5*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rsi
adcq %rdx, %rbp
movq %rsi, V+5*N(%rsp)
// Digit 6 of [u,v]
xorl %ecx, %ecx
movq U+6*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V+6*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
adcq %rdx, %rcx
xorl %esi, %esi
movq U+6*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rbx, U+6*N(%rsp)
addq %rax, %rbp
adcq %rdx, %rsi
movq V+6*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rbp
adcq %rdx, %rsi
movq %rbp, V+6*N(%rsp)
// Digit 7 of [u,v]
xorl %ebx, %ebx
movq U+7*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+7*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
adcq %rdx, %rbx
xorl %ebp, %ebp
movq U+7*N(%rsp), %rax
xorq %r13, %rax
mulq %r12
movq %rcx, U+7*N(%rsp)
addq %rax, %rsi
adcq %rdx, %rbp
movq V+7*N(%rsp), %rax
xorq %r15, %rax
mulq %r14
addq %rax, %rsi
adcq %rdx, %rbp
movq %rsi, V+7*N(%rsp)
// Digits 8 and 9 of u (top is unsigned)
movq U+8*N(%rsp), %rax
xorq %r9, %rax
movq %r9, %rcx
andq %r8, %rcx
negq %rcx
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V+8*N(%rsp), %rax
xorq %r11, %rax
movq %r11, %rdx
andq %r10, %rdx
subq %rdx, %rcx
mulq %r10
addq %rax, %rbx
adcq %rcx, %rdx
// Modular reduction of u
movq %rdx, %rax
shldq $55, %rbx, %rdx
sarq $63, %rax
addq %rax, %rdx
movq %rdx, %rax
shlq $9, %rdx
subq %rdx, %rbx
movq %rax, %rdx
sarq $63, %rax
movq U(%rsp), %rcx
addq %rdx, %rcx
movq %rcx, U(%rsp)
movq U+N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+N(%rsp)
movq U+2*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+2*N(%rsp)
movq U+3*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+3*N(%rsp)
movq U+4*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+4*N(%rsp)
movq U+5*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+5*N(%rsp)
movq U+6*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+6*N(%rsp)
movq U+7*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+7*N(%rsp)
adcq %rax, %rbx
// Preload for last use of old u digit 8
movq U+8*N(%rsp), %rax
movq %rbx, U+8*N(%rsp)
// Digits 8 and 9 of v (top is unsigned)
xorq %r13, %rax
movq %r13, %rbx
andq %r12, %rbx
negq %rbx
mulq %r12
addq %rax, %rbp
adcq %rdx, %rbx
movq V+8*N(%rsp), %rax
xorq %r15, %rax
movq %r15, %rdx
andq %r14, %rdx
subq %rdx, %rbx
mulq %r14
addq %rax, %rbp
adcq %rbx, %rdx
// Modular reduction of v
movq %rdx, %rax
shldq $55, %rbp, %rdx
sarq $63, %rax
addq %rax, %rdx
movq %rdx, %rax
shlq $9, %rdx
subq %rdx, %rbp
movq %rax, %rdx
sarq $63, %rax
movq V(%rsp), %rcx
addq %rdx, %rcx
movq %rcx, V(%rsp)
movq V+N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, V+N(%rsp)
movq V+2*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, V+2*N(%rsp)
movq V+3*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, V+3*N(%rsp)
movq V+4*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, V+4*N(%rsp)
movq V+5*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, V+5*N(%rsp)
movq V+6*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, V+6*N(%rsp)
movq V+7*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, V+7*N(%rsp)
adcq %rax, %rbp
movq %rbp, V+8*N(%rsp)
Lbignum_inv_p521_midloop:
divstep59(d,ff,gg)
movq %rsi, d
// Next iteration
decq i
jnz Lbignum_inv_p521_loop
// The 21st and last iteration does not need anything except the
// u value and the sign of f; the latter can be obtained from the
// lowest word of f. So it's done differently from the main loop.
// Find the sign of the new f. For this we just need one digit
// since we know (for in-scope cases) that f is either +1 or -1.
// We don't explicitly shift right by 59 either, but looking at
// bit 63 (or any bit >= 60) of the unshifted result is enough
// to distinguish -1 from +1; this is then made into a mask.
movq F(%rsp), %rax
movq G(%rsp), %rcx
imulq %r8, %rax
imulq %r10, %rcx
addq %rcx, %rax
sarq $63, %rax
// Now separate out the matrix into sign-magnitude pairs
// and adjust each one based on the sign of f.
//
// Note that at this point we expect |f|=1 and we got its
// sign above, so then since [f,0] == x * [u,v] (mod p_521)
// we want to flip the sign of u according to that of f.
movq %r8, %r9
sarq $63, %r9
xorq %r9, %r8
subq %r9, %r8
xorq %rax, %r9
movq %r10, %r11
sarq $63, %r11
xorq %r11, %r10
subq %r11, %r10
xorq %rax, %r11
movq %r12, %r13
sarq $63, %r13
xorq %r13, %r12
subq %r13, %r12
xorq %rax, %r13
movq %r14, %r15
sarq $63, %r15
xorq %r15, %r14
subq %r15, %r14
xorq %rax, %r15
// Adjust the initial value to allow for complement instead of negation
movq %r8, %rax
andq %r9, %rax
movq %r10, %rbx
andq %r11, %rbx
addq %rax, %rbx
// Digit 0 of u
xorl %ecx, %ecx
movq U(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
movq %rbx, U(%rsp)
adcq %rdx, %rcx
// Digit 1 of u
xorl %ebx, %ebx
movq U+N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
movq %rcx, U+N(%rsp)
adcq %rdx, %rbx
// Digit 2 of u
xorl %ecx, %ecx
movq U+2*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V+2*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
movq %rbx, U+2*N(%rsp)
adcq %rdx, %rcx
// Digit 3 of u
xorl %ebx, %ebx
movq U+3*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+3*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
movq %rcx, U+3*N(%rsp)
adcq %rdx, %rbx
// Digit 4 of u
xorl %ecx, %ecx
movq U+4*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V+4*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
movq %rbx, U+4*N(%rsp)
adcq %rdx, %rcx
// Digit 5 of u
xorl %ebx, %ebx
movq U+5*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+5*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
movq %rcx, U+5*N(%rsp)
adcq %rdx, %rbx
// Digit 6 of u
xorl %ecx, %ecx
movq U+6*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V+6*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rbx
movq %rbx, U+6*N(%rsp)
adcq %rdx, %rcx
// Digit 7 of u
xorl %ebx, %ebx
movq U+7*N(%rsp), %rax
xorq %r9, %rax
mulq %r8
addq %rax, %rcx
adcq %rdx, %rbx
movq V+7*N(%rsp), %rax
xorq %r11, %rax
mulq %r10
addq %rax, %rcx
movq %rcx, U+7*N(%rsp)
adcq %rdx, %rbx
// Digits 8 and 9 of u (top is unsigned)
movq U+8*N(%rsp), %rax
xorq %r9, %rax
movq %r9, %rcx
andq %r8, %rcx
negq %rcx
mulq %r8
addq %rax, %rbx
adcq %rdx, %rcx
movq V+8*N(%rsp), %rax
xorq %r11, %rax
movq %r11, %rdx
andq %r10, %rdx
subq %rdx, %rcx
mulq %r10
addq %rax, %rbx
adcq %rcx, %rdx
// Modular reduction of u
movq %rdx, %rax
shldq $55, %rbx, %rdx
sarq $63, %rax
addq %rax, %rdx
movq %rdx, %rax
shlq $9, %rdx
subq %rdx, %rbx
movq %rax, %rdx
sarq $63, %rax
movq U(%rsp), %rcx
addq %rdx, %rcx
movq %rcx, U(%rsp)
movq U+N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+N(%rsp)
movq U+2*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+2*N(%rsp)
movq U+3*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+3*N(%rsp)
movq U+4*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+4*N(%rsp)
movq U+5*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+5*N(%rsp)
movq U+6*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+6*N(%rsp)
movq U+7*N(%rsp), %rcx
adcq %rax, %rcx
movq %rcx, U+7*N(%rsp)
adcq %rax, %rbx
movq %rbx, U+8*N(%rsp)
// Further strict reduction ready for the output, which just means
// a conditional subtraction of p_521
xorl %eax, %eax
notq %rax
movq U(%rsp), %r8
subq %rax, %r8
movq U+N(%rsp), %r9
sbbq %rax, %r9
movq U+2*N(%rsp), %r10
sbbq %rax, %r10
movq U+3*N(%rsp), %r11
sbbq %rax, %r11
movq U+4*N(%rsp), %r12
sbbq %rax, %r12
movq U+5*N(%rsp), %r13
sbbq %rax, %r13
movq U+6*N(%rsp), %r14
sbbq %rax, %r14
movq U+7*N(%rsp), %r15
sbbq %rax, %r15
movl $0x1FF, %eax
movq U+8*N(%rsp), %rbp
sbbq %rax, %rbp
cmovcq U(%rsp), %r8
cmovcq U+N(%rsp), %r9
cmovcq U+2*N(%rsp), %r10
cmovcq U+3*N(%rsp), %r11
cmovcq U+4*N(%rsp), %r12
cmovcq U+5*N(%rsp), %r13
cmovcq U+6*N(%rsp), %r14
cmovcq U+7*N(%rsp), %r15
cmovcq U+8*N(%rsp), %rbp
// Store it back to the final output
movq res, %rdi
movq %r8, (%rdi)
movq %r9, N(%rdi)
movq %r10, 2*N(%rdi)
movq %r11, 3*N(%rdi)
movq %r12, 4*N(%rdi)
movq %r13, 5*N(%rdi)
movq %r14, 6*N(%rdi)
movq %r15, 7*N(%rdi)
movq %rbp, 8*N(%rdi)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_inv_p521)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
wlsfx/bnbb
| 1,957
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_fromlebytes_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert little-endian bytes to 9-digit 528-bit bignum
//
// extern void bignum_fromlebytes_p521(uint64_t z[static 9],
// const uint8_t x[static 66]);
//
// The result will be < 2^528 since it is translated from 66 bytes.
// It is mainly intended for inputs x < p_521 < 2^521 < 2^528.
//
// Since x86 is little-endian, this is just copying.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_fromlebytes_p521)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_fromlebytes_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_fromlebytes_p521)
.text
#define z %rdi
#define x %rsi
#define a %rax
S2N_BN_SYMBOL(bignum_fromlebytes_p521):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
movq (x), a
movq a, (z)
movq 8(x), a
movq a, 8(z)
movq 16(x), a
movq a, 16(z)
movq 24(x), a
movq a, 24(z)
movq 32(x), a
movq a, 32(z)
movq 40(x), a
movq a, 40(z)
movq 48(x), a
movq a, 48(z)
movq 56(x), a
movq a, 56(z)
xorl %eax, %eax
movw 64(x), %ax
movq a, 64(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_fromlebytes_p521)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 9,163
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_sqr_p521_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Square modulo p_521, z := (x^2) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_sqr_p521_alt(uint64_t z[static 9],
// const uint64_t x[static 9]);
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p521_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_p521_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p521_alt)
.text
// Input arguments
#define z %rdi
#define x %rsi
// Macro for the key "multiply and add to (c,h,l)" step
#define combadd(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
// Set up initial window (c,h,l) = numa * numb
#define combaddz(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
xorq c, c ; \
movq %rax, l ; \
movq %rdx, h
// Doubling step (c,h,l) = 2 * (c,hh,ll) + (0,h,l)
#define doubladd(c,h,l,hh,ll) \
addq ll, ll ; \
adcq hh, hh ; \
adcq c, c ; \
addq ll, l ; \
adcq hh, h ; \
adcq $0, c
// Square term incorporation (c,h,l) += numba^2
#define combadd1(c,h,l,numa) \
movq numa, %rax ; \
mulq %rax; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
// A short form where we don't expect a top carry
#define combads(h,l,numa) \
movq numa, %rax ; \
mulq %rax; \
addq %rax, l ; \
adcq %rdx, h
// A version doubling directly before adding, for single non-square terms
#define combadd2(c,h,l,numa,numb) \
movq numa, %rax ; \
mulq numb; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0, c ; \
addq %rax, l ; \
adcq %rdx, h ; \
adcq $0, c
S2N_BN_SYMBOL(bignum_sqr_p521_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Make more registers available and make temporary space on stack
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(72)
// Start doing a conventional columnwise squaring,
// temporarily storing the lower 9 digits on the stack.
// Start with result term 0
movq (x), %rax
mulq %rax
movq %rax, (%rsp)
movq %rdx, %r9
xorq %r10, %r10
// Result term 1
xorq %r11, %r11
combadd2(%r11,%r10,%r9,(x),8(x))
movq %r9, 8(%rsp)
// Result term 2
xorq %r12, %r12
combadd1(%r12,%r11,%r10,8(x))
combadd2(%r12,%r11,%r10,(x),16(x))
movq %r10, 16(%rsp)
// Result term 3
combaddz(%r13,%rcx,%rbx,(x),24(x))
combadd(%r13,%rcx,%rbx,8(x),16(x))
doubladd(%r13,%r12,%r11,%rcx,%rbx)
movq %r11, 24(%rsp)
// Result term 4
combaddz(%r14,%rcx,%rbx,(x),32(x))
combadd(%r14,%rcx,%rbx,8(x),24(x))
doubladd(%r14,%r13,%r12,%rcx,%rbx)
combadd1(%r14,%r13,%r12,16(x))
movq %r12, 32(%rsp)
// Result term 5
combaddz(%r15,%rcx,%rbx,(x),40(x))
combadd(%r15,%rcx,%rbx,8(x),32(x))
combadd(%r15,%rcx,%rbx,16(x),24(x))
doubladd(%r15,%r14,%r13,%rcx,%rbx)
movq %r13, 40(%rsp)
// Result term 6
combaddz(%r8,%rcx,%rbx,(x),48(x))
combadd(%r8,%rcx,%rbx,8(x),40(x))
combadd(%r8,%rcx,%rbx,16(x),32(x))
doubladd(%r8,%r15,%r14,%rcx,%rbx)
combadd1(%r8,%r15,%r14,24(x))
movq %r14, 48(%rsp)
// Result term 7
combaddz(%r9,%rcx,%rbx,(x),56(x))
combadd(%r9,%rcx,%rbx,8(x),48(x))
combadd(%r9,%rcx,%rbx,16(x),40(x))
combadd(%r9,%rcx,%rbx,24(x),32(x))
doubladd(%r9,%r8,%r15,%rcx,%rbx)
movq %r15, 56(%rsp)
// Result term 8
combaddz(%r10,%rcx,%rbx,(x),64(x))
combadd(%r10,%rcx,%rbx,8(x),56(x))
combadd(%r10,%rcx,%rbx,16(x),48(x))
combadd(%r10,%rcx,%rbx,24(x),40(x))
doubladd(%r10,%r9,%r8,%rcx,%rbx)
combadd1(%r10,%r9,%r8,32(x))
movq %r8, 64(%rsp)
// We now stop writing back and keep remaining results in a register window.
// Continue with result term 9
combaddz(%r11,%rcx,%rbx,8(x),64(x))
combadd(%r11,%rcx,%rbx,16(x),56(x))
combadd(%r11,%rcx,%rbx,24(x),48(x))
combadd(%r11,%rcx,%rbx,32(x),40(x))
doubladd(%r11,%r10,%r9,%rcx,%rbx)
// Result term 10
combaddz(%r12,%rcx,%rbx,16(x),64(x))
combadd(%r12,%rcx,%rbx,24(x),56(x))
combadd(%r12,%rcx,%rbx,32(x),48(x))
doubladd(%r12,%r11,%r10,%rcx,%rbx)
combadd1(%r12,%r11,%r10,40(x))
// Result term 11
combaddz(%r13,%rcx,%rbx,24(x),64(x))
combadd(%r13,%rcx,%rbx,32(x),56(x))
combadd(%r13,%rcx,%rbx,40(x),48(x))
doubladd(%r13,%r12,%r11,%rcx,%rbx)
// Result term 12
combaddz(%r14,%rcx,%rbx,32(x),64(x))
combadd(%r14,%rcx,%rbx,40(x),56(x))
doubladd(%r14,%r13,%r12,%rcx,%rbx)
combadd1(%r14,%r13,%r12,48(x))
// Result term 13
combaddz(%r15,%rcx,%rbx,40(x),64(x))
combadd(%r15,%rcx,%rbx,48(x),56(x))
doubladd(%r15,%r14,%r13,%rcx,%rbx);
// Result term 14
xorq %r8, %r8
combadd1(%r8,%r15,%r14,56(x))
combadd2(%r8,%r15,%r14,48(x),64(x))
// Result term 15
movq 56(x), %rax
mulq 64(x)
addq %rax, %rax
adcq %rdx, %rdx
addq %rax, %r15
adcq %rdx, %r8
// Result term 16
movq 64(x), %rax
imulq %rax, %rax
addq %r8, %rax
// Now the upper portion is [%rax;%r15;%r14;%r13;%r12;%r11;%r10;%r9;[%rsp+64]].
// Rotate the upper portion right 9 bits since 2^512 == 2^-9 (mod p_521)
// Let rotated result %rdx,%r15,%r14,...,%r8 be h (high) and %rsp[0..7] be l (low)
movq 64(%rsp), %r8
movq %r8, %rdx
andq $0x1FF, %rdx
shrdq $9, %r9, %r8
shrdq $9, %r10, %r9
shrdq $9, %r11, %r10
shrdq $9, %r12, %r11
shrdq $9, %r13, %r12
shrdq $9, %r14, %r13
shrdq $9, %r15, %r14
shrdq $9, %rax, %r15
shrq $9, %rax
addq %rax, %rdx
// Force carry-in then add to get s = h + l + 1
// but actually add all 1s in the top 53 bits to get simple carry out
stc
adcq (%rsp), %r8
adcq 8(%rsp), %r9
adcq 16(%rsp), %r10
adcq 24(%rsp), %r11
adcq 32(%rsp), %r12
adcq 40(%rsp), %r13
adcq 48(%rsp), %r14
adcq 56(%rsp), %r15
adcq $~0x1FF, %rdx
// Now CF is set <=> h + l + 1 >= 2^521 <=> h + l >= p_521,
// in which case the lower 521 bits are already right. Otherwise if
// CF is clear, we want to subtract 1. Hence subtract the complement
// of the carry flag then mask the top word, which scrubs the
// padding in either case. Write digits back as they are created.
cmc
sbbq $0, %r8
movq %r8, (z)
sbbq $0, %r9
movq %r9, 8(z)
sbbq $0, %r10
movq %r10, 16(z)
sbbq $0, %r11
movq %r11, 24(z)
sbbq $0, %r12
movq %r12, 32(z)
sbbq $0, %r13
movq %r13, 40(z)
sbbq $0, %r14
movq %r14, 48(z)
sbbq $0, %r15
movq %r15, 56(z)
sbbq $0, %rdx
andq $0x1FF, %rdx
movq %rdx, 64(z)
// Restore registers and return
CFI_INC_RSP(72)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_sqr_p521_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 3,838
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_deamont_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from Montgomery form z := (x / 2^576) mod p_521
// Input x[9]; output z[9]
//
// extern void bignum_deamont_p521(uint64_t z[static 9],
// const uint64_t x[static 9]);
//
// Convert a 9-digit bignum x out of its (optionally almost) Montgomery form,
// "almost" meaning any 9-digit input will work, with no range restriction.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p521)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_deamont_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p521)
.text
#define z %rdi
#define x %rsi
#define c %rax
#define h %rax
#define l %rbx
#define d0 %rdx
#define d1 %rcx
#define d2 %r8
#define d3 %r9
#define d4 %r10
#define d5 %r11
#define d6 %r12
#define d7 %r13
#define d8 %rbp
S2N_BN_SYMBOL(bignum_deamont_p521):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Save more registers to play with
CFI_PUSH(%rbx)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%rbp)
// Stash the lowest 55 bits at the top of c, then shift the whole 576-bit
// input right by 9*64 - 521 = 576 - 521 = 55 bits.
movq (x), d0
movq d0, c
shlq $9, c
movq 8(x), d1
shrdq $55, d1, d0
movq 16(x), d2
shrdq $55, d2, d1
movq 24(x), d3
shrdq $55, d3, d2
movq 32(x), d4
shrdq $55, d4, d3
movq 40(x), d5
shrdq $55, d5, d4
movq 48(x), d6
shrdq $55, d6, d5
movq 56(x), d7
shrdq $55, d7, d6
movq 64(x), d8
shrdq $55, d8, d7
shrq $55, d8
// Now writing x = 2^55 * h + l (so here [d8;..d0] = h and c = 2^9 * l)
// we want (h + 2^{521-55} * l) mod p_521 = s mod p_521. Since s < 2 * p_521
// this is just "if s >= p_521 then s - p_521 else s". First get
// s + 1, but pad up the top to get a top-bit carry-out from it, so now
// CF <=> s + 1 >= 2^521 <=> s >= p_521, while the digits [d8;...d0] are
// now s + 1 except for bits above 521.
movq c, l
shrq $55, h
shlq $9, l
orq $~0x1FF, d8
addq $1, d0
adcq $0, d1
adcq $0, d2
adcq $0, d3
adcq $0, d4
adcq $0, d5
adcq $0, d6
adcq l, d7
adcq h, d8
// We want "if CF then (s + 1) - 2^521 else (s + 1) - 1" so subtract ~CF
// and mask to 521 bits, writing digits back as they are created.
cmc
sbbq $0, d0
movq d0, (z)
sbbq $0, d1
movq d1, 8(z)
sbbq $0, d2
movq d2, 16(z)
sbbq $0, d3
movq d3, 24(z)
sbbq $0, d4
movq d4, 32(z)
sbbq $0, d5
movq d5, 40(z)
sbbq $0, d6
movq d6, 48(z)
sbbq $0, d7
movq d7, 56(z)
sbbq $0, d8
andq $0x1FF, d8
movq d8, 64(z)
// Restore registers and return
CFI_POP(%rbp)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_deamont_p521)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 4,364
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/bignum_mod_n521_9_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo group order, z := x mod n_521
// Input x[9]; output z[9]
//
// extern void bignum_mod_n521_9_alt(uint64_t z[static 9],
// const uint64_t x[static 9]);
//
// Reduction is modulo the group order of the NIST curve P-521.
//
// Standard x86-64 ABI: RDI = z, RSI = x
// Microsoft x64 ABI: RCX = z, RDX = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n521_9_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_n521_9_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n521_9_alt)
.text
#define z %rdi
#define x %rsi
#define q %rcx
#define a %rax
#define d %rdx
#define c %rcx
#define n0 %r8
#define n1 %r9
#define n2 %r10
#define n3 %r11
#define ashort %eax
#define cshort %ecx
#define qshort %edx
S2N_BN_SYMBOL(bignum_mod_n521_9_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
#endif
// Load the top digit, putting a bit-stuffed version in output buffer.
// The initial quotient estimate is q = h + 1 where x = 2^521 * h + t
// The last add also clears the CF and OF flags ready for the carry chain.
movq 64(x), q
movq $~0x1FF, a
orq q, a
movq a, 64(z)
shrq $9, q
addq $1, q
// Now load other digits and form r = x - q * n_521 = (q * r_521 + t) - 2^521,
// which is stored in the output buffer. Thanks to the bit-stuffing at the
// start, we get r' = (q * r_521 + t) + (2^576 - 2^521) = r + 2^576 as the
// computed result including the top carry. Hence CF <=> r >= 0, while
// r' == r (mod 2^521) because things below bit 521 are uncorrupted. We
// keep the top word in the register c since we at least have that one free.
movq $0x449048e16ec79bf7, %rax
mulq q
movq %rax, n0
movq %rdx, n1
movq $0xc44a36477663b851, %rax
mulq q
xorq n2, n2
addq %rax, n1
adcq %rdx, n2
movq $0x8033feb708f65a2f, %rax
mulq q
xorq n3, n3
addq %rax, n2
adcq %rdx, n3
movq $0xae79787c40d06994, %rax
mulq q
imulq $5, q
addq %rax, n3
adcq %rdx, q
sbbq %rdx, %rdx
negq %rdx
// [%rdx;q;n3;n2;n1;n0] = q * r_521
xorl %eax, %eax // %rax is used as a zero hereafter
addq (x), n0
movq n0, (z)
adcq 8(x), n1
movq n1, 8(z)
adcq 16(x), n2
movq n2, 16(z)
adcq 24(x), n3
movq n3, 24(z)
adcq 32(x), q
movq q, 32(z)
adcq 40(x), %rdx
movq %rdx, 40(z)
movq 48(x), d
adcq %rax, d
movq d, 48(z)
movq 56(x), d
adcq %rax, d
movq d, 56(z)
movq 64(z), c
adcq %rax, c
// We already know r < n_521, but if it actually went negative then
// we need to add back n_521 again. Use d as a bitmask for r < n_521,
// and just subtract r_521 and mask rather than literally adding 2^521.
// This also gets rid of the bit-stuffing above.
cmc
sbbq d, d
movq $0x449048e16ec79bf7, n0
andq d, n0
movq $0xc44a36477663b851, n1
andq d, n1
movq $0x8033feb708f65a2f, n2
andq d, n2
movq $0xae79787c40d06994, n3
andq d, n3
andq $5, d
subq n0, (z)
sbbq n1, 8(z)
sbbq n2, 16(z)
sbbq n3, 24(z)
sbbq d, 32(z)
sbbq %rax, 40(z)
sbbq %rax, 48(z)
sbbq %rax, 56(z)
sbbl ashort, cshort
andl $0x1FF, cshort
movq c, 64(z)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(bignum_mod_n521_9_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
wlsfx/bnbb
| 52,791
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p521/p521_jadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jadd_alt(uint64_t p3[static 27], const uint64_t p1[static 27],
// const uint64_t p2[static 27]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input points p1 and p2 are
// fully reduced mod p_521, that both z coordinates are nonzero and
// that neither p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents
// the same affine point as".
//
// Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2
// Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum_x86_att.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jadd_alt)
S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jadd_alt)
.text
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
// These are where they arrive except for input_y, initially in %rdx
#define input_z %rdi
#define input_x %rsi
#define input_y %rcx
// Pointer-offset pairs for inputs and outputs
#define x_1 0(input_x)
#define y_1 NUMSIZE(input_x)
#define z_1 (2*NUMSIZE)(input_x)
#define x_2 0(input_y)
#define y_2 NUMSIZE(input_y)
#define z_2 (2*NUMSIZE)(input_y)
#define x_3 0(input_z)
#define y_3 NUMSIZE(input_z)
#define z_3 (2*NUMSIZE)(input_z)
// Pointer-offset pairs for temporaries, with some aliasing
// The tmp field is internal storage for field mul and sqr.
// NSPACE is the total stack needed for these temporaries
#define z1sq (NUMSIZE*0)(%rsp)
#define ww (NUMSIZE*0)(%rsp)
#define resx (NUMSIZE*0)(%rsp)
#define yd (NUMSIZE*1)(%rsp)
#define y2a (NUMSIZE*1)(%rsp)
#define x2a (NUMSIZE*2)(%rsp)
#define zzx2 (NUMSIZE*2)(%rsp)
#define zz (NUMSIZE*3)(%rsp)
#define t1 (NUMSIZE*3)(%rsp)
#define t2 (NUMSIZE*4)(%rsp)
#define x1a (NUMSIZE*4)(%rsp)
#define zzx1 (NUMSIZE*4)(%rsp)
#define resy (NUMSIZE*4)(%rsp)
#define xd (NUMSIZE*5)(%rsp)
#define z2sq (NUMSIZE*5)(%rsp)
#define resz (NUMSIZE*5)(%rsp)
#define y1a (NUMSIZE*6)(%rsp)
#define tmp (NUMSIZE*7)(%rsp)
#define NSPACE NUMSIZE*8
// Corresponds exactly to bignum_mul_p521_alt except temp storage
#define mul_p521(P0,P1,P2) \
movq P1, %rax ; \
mulq P2; \
movq %rax, 504(%rsp) ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
movq 0x8+P1, %rax ; \
mulq P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
movq %r9, 512(%rsp) ; \
xorq %r12, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x10+P1, %rax ; \
mulq P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq %r10, 520(%rsp) ; \
xorq %r13, %r13 ; \
movq P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x10+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x18+P1, %rax ; \
mulq P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq %r11, 528(%rsp) ; \
xorq %r14, %r14 ; \
movq P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x18+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x20+P1, %rax ; \
mulq P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq %r12, 536(%rsp) ; \
xorq %r15, %r15 ; \
movq P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x8+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x18+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x20+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x28+P1, %rax ; \
mulq P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq %r13, 544(%rsp) ; \
xorq %r8, %r8 ; \
movq P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq %r8, %r8 ; \
movq 0x8+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x10+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x18+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x20+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x28+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x30+P1, %rax ; \
mulq P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq %r14, 552(%rsp) ; \
xorq %r9, %r9 ; \
movq P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq %r9, %r9 ; \
movq 0x8+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x10+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x18+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x20+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x28+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x30+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq 0x38+P1, %rax ; \
mulq P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
adcq $0x0, %r9 ; \
movq %r15, 560(%rsp) ; \
xorq %r10, %r10 ; \
movq P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq %r10, %r10 ; \
movq 0x8+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x10+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x18+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x28+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x30+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x38+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x40+P1, %rax ; \
mulq P2; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq %r8, 568(%rsp) ; \
xorq %r11, %r11 ; \
movq 0x8+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq %r11, %r11 ; \
movq 0x10+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x18+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x20+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x28+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x30+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x38+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x40+P1, %rax ; \
mulq 0x8+P2; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
xorq %r12, %r12 ; \
movq 0x10+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq %r12, %r12 ; \
movq 0x18+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x20+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x28+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x30+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x38+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x40+P1, %rax ; \
mulq 0x10+P2; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
xorq %r13, %r13 ; \
movq 0x18+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq %r13, %r13 ; \
movq 0x20+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x28+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x30+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x38+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x40+P1, %rax ; \
mulq 0x18+P2; \
addq %rax, %r11 ; \
adcq %rdx, %r12 ; \
adcq $0x0, %r13 ; \
xorq %r14, %r14 ; \
movq 0x20+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq %r14, %r14 ; \
movq 0x28+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x30+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x38+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x40+P1, %rax ; \
mulq 0x20+P2; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
xorq %r15, %r15 ; \
movq 0x28+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq %r15, %r15 ; \
movq 0x30+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x38+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
movq 0x40+P1, %rax ; \
mulq 0x28+P2; \
addq %rax, %r13 ; \
adcq %rdx, %r14 ; \
adcq $0x0, %r15 ; \
xorq %r8, %r8 ; \
movq 0x30+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq %r8, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x40+P1, %rax ; \
mulq 0x30+P2; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x40+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
mulq 0x38+P2; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
imulq 0x40+P2, %rax ; \
addq %r8, %rax ; \
movq 568(%rsp), %r8 ; \
movq %r8, %rdx ; \
andq $0x1ff, %rdx ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rax, %r15 ; \
shrq $0x9, %rax ; \
addq %rax, %rdx ; \
stc; \
adcq 504(%rsp), %r8 ; \
adcq 512(%rsp), %r9 ; \
adcq 520(%rsp), %r10 ; \
adcq 528(%rsp), %r11 ; \
adcq 536(%rsp), %r12 ; \
adcq 544(%rsp), %r13 ; \
adcq 552(%rsp), %r14 ; \
adcq 560(%rsp), %r15 ; \
adcq $0xfffffffffffffe00, %rdx ; \
cmc; \
sbbq $0x0, %r8 ; \
movq %r8, P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0 ; \
sbbq $0x0, %r14 ; \
movq %r14, 0x30+P0 ; \
sbbq $0x0, %r15 ; \
movq %r15, 0x38+P0 ; \
sbbq $0x0, %rdx ; \
andq $0x1ff, %rdx ; \
movq %rdx, 0x40+P0
// Corresponds to bignum_sqr_p521_alt except %rbp is used
// in place of %rcx and tmp is the temp storage location
#define sqr_p521(P0,P1) \
movq P1, %rax ; \
mulq %rax; \
movq %rax, 504(%rsp) ; \
movq %rdx, %r9 ; \
xorq %r10, %r10 ; \
xorq %r11, %r11 ; \
movq P1, %rax ; \
mulq 0x8+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r11 ; \
addq %rax, %r9 ; \
adcq %rdx, %r10 ; \
adcq $0x0, %r11 ; \
movq %r9, 512(%rsp) ; \
xorq %r12, %r12 ; \
movq 0x8+P1, %rax ; \
mulq %rax; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r12 ; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq %r10, 520(%rsp) ; \
movq P1, %rax ; \
mulq 0x18+P1; \
xorq %r13, %r13 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x10+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r13 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r13, %r13 ; \
addq %rbx, %r11 ; \
adcq %rbp, %r12 ; \
adcq $0x0, %r13 ; \
movq %r11, 528(%rsp) ; \
movq P1, %rax ; \
mulq 0x20+P1; \
xorq %r14, %r14 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r14 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r14, %r14 ; \
addq %rbx, %r12 ; \
adcq %rbp, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x10+P1, %rax ; \
mulq %rax; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq %r12, 536(%rsp) ; \
movq P1, %rax ; \
mulq 0x28+P1; \
xorq %r15, %r15 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x20+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r15 ; \
movq 0x10+P1, %rax ; \
mulq 0x18+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r15 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r15, %r15 ; \
addq %rbx, %r13 ; \
adcq %rbp, %r14 ; \
adcq $0x0, %r15 ; \
movq %r13, 544(%rsp) ; \
movq P1, %rax ; \
mulq 0x30+P1; \
xorq %r8, %r8 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r8 ; \
movq 0x10+P1, %rax ; \
mulq 0x20+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r8 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r8, %r8 ; \
addq %rbx, %r14 ; \
adcq %rbp, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x18+P1, %rax ; \
mulq %rax; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq %r14, 552(%rsp) ; \
movq P1, %rax ; \
mulq 0x38+P1; \
xorq %r9, %r9 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r9 ; \
movq 0x10+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r9 ; \
movq 0x18+P1, %rax ; \
mulq 0x20+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r9 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r9, %r9 ; \
addq %rbx, %r15 ; \
adcq %rbp, %r8 ; \
adcq $0x0, %r9 ; \
movq %r15, 560(%rsp) ; \
movq P1, %rax ; \
mulq 0x40+P1; \
xorq %r10, %r10 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x8+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r10 ; \
movq 0x10+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r10 ; \
movq 0x18+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r10 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r10, %r10 ; \
addq %rbx, %r8 ; \
adcq %rbp, %r9 ; \
adcq $0x0, %r10 ; \
movq 0x20+P1, %rax ; \
mulq %rax; \
addq %rax, %r8 ; \
adcq %rdx, %r9 ; \
adcq $0x0, %r10 ; \
movq %r8, 568(%rsp) ; \
movq 0x8+P1, %rax ; \
mulq 0x40+P1; \
xorq %r11, %r11 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x10+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r11 ; \
movq 0x18+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r11 ; \
movq 0x20+P1, %rax ; \
mulq 0x28+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r11 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r11, %r11 ; \
addq %rbx, %r9 ; \
adcq %rbp, %r10 ; \
adcq $0x0, %r11 ; \
movq 0x10+P1, %rax ; \
mulq 0x40+P1; \
xorq %r12, %r12 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x18+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r12 ; \
movq 0x20+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r12 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r12, %r12 ; \
addq %rbx, %r10 ; \
adcq %rbp, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x28+P1, %rax ; \
mulq %rax; \
addq %rax, %r10 ; \
adcq %rdx, %r11 ; \
adcq $0x0, %r12 ; \
movq 0x18+P1, %rax ; \
mulq 0x40+P1; \
xorq %r13, %r13 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x20+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r13 ; \
movq 0x28+P1, %rax ; \
mulq 0x30+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r13 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r13, %r13 ; \
addq %rbx, %r11 ; \
adcq %rbp, %r12 ; \
adcq $0x0, %r13 ; \
movq 0x20+P1, %rax ; \
mulq 0x40+P1; \
xorq %r14, %r14 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x28+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r14 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r14, %r14 ; \
addq %rbx, %r12 ; \
adcq %rbp, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x30+P1, %rax ; \
mulq %rax; \
addq %rax, %r12 ; \
adcq %rdx, %r13 ; \
adcq $0x0, %r14 ; \
movq 0x28+P1, %rax ; \
mulq 0x40+P1; \
xorq %r15, %r15 ; \
movq %rax, %rbx ; \
movq %rdx, %rbp ; \
movq 0x30+P1, %rax ; \
mulq 0x38+P1; \
addq %rax, %rbx ; \
adcq %rdx, %rbp ; \
adcq $0x0, %r15 ; \
addq %rbx, %rbx ; \
adcq %rbp, %rbp ; \
adcq %r15, %r15 ; \
addq %rbx, %r13 ; \
adcq %rbp, %r14 ; \
adcq $0x0, %r15 ; \
xorq %r8, %r8 ; \
movq 0x38+P1, %rax ; \
mulq %rax; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x30+P1, %rax ; \
mulq 0x40+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
adcq $0x0, %r8 ; \
addq %rax, %r14 ; \
adcq %rdx, %r15 ; \
adcq $0x0, %r8 ; \
movq 0x38+P1, %rax ; \
mulq 0x40+P1; \
addq %rax, %rax ; \
adcq %rdx, %rdx ; \
addq %rax, %r15 ; \
adcq %rdx, %r8 ; \
movq 0x40+P1, %rax ; \
imulq %rax, %rax ; \
addq %r8, %rax ; \
movq 568(%rsp), %r8 ; \
movq %r8, %rdx ; \
andq $0x1ff, %rdx ; \
shrdq $0x9, %r9, %r8 ; \
shrdq $0x9, %r10, %r9 ; \
shrdq $0x9, %r11, %r10 ; \
shrdq $0x9, %r12, %r11 ; \
shrdq $0x9, %r13, %r12 ; \
shrdq $0x9, %r14, %r13 ; \
shrdq $0x9, %r15, %r14 ; \
shrdq $0x9, %rax, %r15 ; \
shrq $0x9, %rax ; \
addq %rax, %rdx ; \
stc; \
adcq 504(%rsp), %r8 ; \
adcq 512(%rsp), %r9 ; \
adcq 520(%rsp), %r10 ; \
adcq 528(%rsp), %r11 ; \
adcq 536(%rsp), %r12 ; \
adcq 544(%rsp), %r13 ; \
adcq 552(%rsp), %r14 ; \
adcq 560(%rsp), %r15 ; \
adcq $0xfffffffffffffe00, %rdx ; \
cmc; \
sbbq $0x0, %r8 ; \
movq %r8, P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x8+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x10+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x18+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x20+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x28+P0 ; \
sbbq $0x0, %r14 ; \
movq %r14, 0x30+P0 ; \
sbbq $0x0, %r15 ; \
movq %r15, 0x38+P0 ; \
sbbq $0x0, %rdx ; \
andq $0x1ff, %rdx ; \
movq %rdx, 0x40+P0
// Corresponds exactly to bignum_sub_p521
#define sub_p521(P0,P1,P2) \
movq P1, %rax ; \
subq P2, %rax ; \
movq 0x8+P1, %rdx ; \
sbbq 0x8+P2, %rdx ; \
movq 0x10+P1, %r8 ; \
sbbq 0x10+P2, %r8 ; \
movq 0x18+P1, %r9 ; \
sbbq 0x18+P2, %r9 ; \
movq 0x20+P1, %r10 ; \
sbbq 0x20+P2, %r10 ; \
movq 0x28+P1, %r11 ; \
sbbq 0x28+P2, %r11 ; \
movq 0x30+P1, %r12 ; \
sbbq 0x30+P2, %r12 ; \
movq 0x38+P1, %r13 ; \
sbbq 0x38+P2, %r13 ; \
movq 0x40+P1, %r14 ; \
sbbq 0x40+P2, %r14 ; \
sbbq $0x0, %rax ; \
movq %rax, P0 ; \
sbbq $0x0, %rdx ; \
movq %rdx, 0x8+P0 ; \
sbbq $0x0, %r8 ; \
movq %r8, 0x10+P0 ; \
sbbq $0x0, %r9 ; \
movq %r9, 0x18+P0 ; \
sbbq $0x0, %r10 ; \
movq %r10, 0x20+P0 ; \
sbbq $0x0, %r11 ; \
movq %r11, 0x28+P0 ; \
sbbq $0x0, %r12 ; \
movq %r12, 0x30+P0 ; \
sbbq $0x0, %r13 ; \
movq %r13, 0x38+P0 ; \
sbbq $0x0, %r14 ; \
andq $0x1ff, %r14 ; \
movq %r14, 0x40+P0
// Additional macros to help with final multiplexing
#define load9(r0,r1,r2,r3,r4,r5,r6,r7,ra,P) \
movq P, r0 ; \
movq 8+P, r1 ; \
movq 16+P, r2 ; \
movq 24+P, r3 ; \
movq 32+P, r4 ; \
movq 40+P, r5 ; \
movq 48+P, r6 ; \
movq 56+P, r7 ; \
movq 64+P, ra
#define store9(P,r0,r1,r2,r3,r4,r5,r6,r7,ra) \
movq r0, P ; \
movq r1, 8+P ; \
movq r2, 16+P ; \
movq r3, 24+P ; \
movq r4, 32+P ; \
movq r5, 40+P ; \
movq r6, 48+P ; \
movq r7, 56+P ; \
movq ra, 64+P
#define muxload9(r0,r1,r2,r3,r4,r5,r6,r7,ra,P0,P1,P2) \
movq P0, r0 ; \
cmovbq P1, r0 ; \
cmovnbe P2, r0 ; \
movq 8+P0, r1 ; \
cmovbq 8+P1, r1 ; \
cmovnbe 8+P2, r1 ; \
movq 16+P0, r2 ; \
cmovbq 16+P1, r2 ; \
cmovnbe 16+P2, r2 ; \
movq 24+P0, r3 ; \
cmovbq 24+P1, r3 ; \
cmovnbe 24+P2, r3 ; \
movq 32+P0, r4 ; \
cmovbq 32+P1, r4 ; \
cmovnbe 32+P2, r4 ; \
movq 40+P0, r5 ; \
cmovbq 40+P1, r5 ; \
cmovnbe 40+P2, r5 ; \
movq 48+P0, r6 ; \
cmovbq 48+P1, r6 ; \
cmovnbe 48+P2, r6 ; \
movq 56+P0, r7 ; \
cmovbq 56+P1, r7 ; \
cmovnbe 56+P2, r7 ; \
movq 64+P0, ra ; \
cmovbq 64+P1, ra ; \
cmovnbe 64+P2, ra
#define copy9(P0,P1) \
movq P1, %rax ; \
movq %rax, P0 ; \
movq 8+P1, %rax ; \
movq %rax, 8+P0 ; \
movq 16+P1, %rax ; \
movq %rax, 16+P0 ; \
movq 24+P1, %rax ; \
movq %rax, 24+P0 ; \
movq 32+P1, %rax ; \
movq %rax, 32+P0 ; \
movq 40+P1, %rax ; \
movq %rax, 40+P0 ; \
movq 48+P1, %rax ; \
movq %rax, 48+P0 ; \
movq 56+P1, %rax ; \
movq %rax, 56+P0 ; \
movq 64+P1, %rax ; \
movq %rax, 64+P0
S2N_BN_SYMBOL(p521_jadd_alt):
CFI_START
_CET_ENDBR
#if WINDOWS_ABI
CFI_PUSH(%rdi)
CFI_PUSH(%rsi)
movq %rcx, %rdi
movq %rdx, %rsi
movq %r8, %rdx
#endif
// Save registers and make room on stack for temporary variables
CFI_PUSH(%rbx)
CFI_PUSH(%rbp)
CFI_PUSH(%r12)
CFI_PUSH(%r13)
CFI_PUSH(%r14)
CFI_PUSH(%r15)
CFI_DEC_RSP(NSPACE)
// Move the input arguments to stable places (two are already there)
movq %rdx, input_y
// Main code, just a sequence of basic field operations
sqr_p521(z1sq,z_1)
sqr_p521(z2sq,z_2)
mul_p521(y1a,z_2,y_1)
mul_p521(y2a,z_1,y_2)
mul_p521(x2a,z1sq,x_2)
mul_p521(x1a,z2sq,x_1)
mul_p521(y2a,z1sq,y2a)
mul_p521(y1a,z2sq,y1a)
sub_p521(xd,x2a,x1a)
sub_p521(yd,y2a,y1a)
sqr_p521(zz,xd)
sqr_p521(ww,yd)
mul_p521(zzx1,zz,x1a)
mul_p521(zzx2,zz,x2a)
sub_p521(resx,ww,zzx1)
sub_p521(t1,zzx2,zzx1)
mul_p521(xd,xd,z_1)
sub_p521(resx,resx,zzx2)
sub_p521(t2,zzx1,resx)
mul_p521(t1,t1,y1a)
mul_p521(resz,xd,z_2)
mul_p521(t2,yd,t2)
sub_p521(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "NBE" <=> ~(CF \/ ZF) <=> P1 = 0 /\ ~(P2 = 0)
// and "B" <=> CF <=> ~(P1 = 0) /\ P2 = 0
// and "Z" <=> ZF <=> (P1 = 0 <=> P2 = 0)
load9(%r8,%r9,%r10,%r11,%r12,%r13,%r14,%r15,%rbp,z_1)
orq %r9, %r8
orq %r11, %r10
orq %r13, %r12
orq %r15, %r14
orq %r10, %r8
orq %r14, %r12
orq %rbp, %r8
orq %r12, %r8
negq %r8
sbbq %rax, %rax
load9(%r8,%r9,%r10,%r11,%r12,%r13,%r14,%r15,%rbp,z_2)
orq %r9, %r8
orq %r11, %r10
orq %r13, %r12
orq %r15, %r14
orq %r10, %r8
orq %r14, %r12
orq %rbp, %r8
orq %r12, %r8
negq %r8
sbbq %rdx, %rdx
cmpq %rax, %rdx
// Multiplex the outputs accordingly. Re-store them in resz until there
// are no more loads, so there are no assumptions on input-output aliasing
muxload9(%r8,%r9,%r10,%r11,%r12,%r13,%r14,%r15,%rbp,resy,y_1,y_2)
store9(resy,%r8,%r9,%r10,%r11,%r12,%r13,%r14,%r15,%rbp)
muxload9(%r8,%r9,%r10,%r11,%r12,%r13,%r14,%r15,%rbp,resz,z_1,z_2)
store9(resz,%r8,%r9,%r10,%r11,%r12,%r13,%r14,%r15,%rbp)
muxload9(%r8,%r9,%r10,%r11,%r12,%r13,%r14,%r15,%rbp,resx,x_1,x_2)
store9(x_3,%r8,%r9,%r10,%r11,%r12,%r13,%r14,%r15,%rbp)
copy9(y_3,resy)
copy9(z_3,resz)
// Restore stack and registers
CFI_INC_RSP(NSPACE)
CFI_POP(%r15)
CFI_POP(%r14)
CFI_POP(%r13)
CFI_POP(%r12)
CFI_POP(%rbp)
CFI_POP(%rbx)
#if WINDOWS_ABI
CFI_POP(%rsi)
CFI_POP(%rdi)
#endif
CFI_RET
S2N_BN_SIZE_DIRECTIVE(p521_jadd_alt)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.