repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
wlsfx/bnbb
2,428
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_optadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Optionally add, z := x + y (if p nonzero) or z := x (if p zero) // Inputs x[k], p, y[k]; outputs function return (carry-out) and z[k] // // extern uint64_t bignum_optadd(uint64_t k, uint64_t *z, const uint64_t *x, // uint64_t p, const uint64_t *y); // // It is assumed that all numbers x, y and z have the same size k digits. // Returns carry-out as per usual addition, always 0 if p was zero. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = x, RCX = p, R8 = y, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = x, R9 = p, [RSP+40] = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optadd) .text #define k %rdi #define z %rsi #define x %rdx #define p %rcx #define y %r8 #define c %rax #define i %r9 #define b %r10 #define a %r11 S2N_BN_SYMBOL(bignum_optadd): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // Initialize top carry to zero in all cases (also return value) xorq c, c // If k = 0 do nothing testq k, k jz Lbignum_optadd_end // Convert the nonzero/zero status of p into an all-1s or all-0s mask negq p sbbq p, p // Now go round the loop for i=0...k-1, saving the carry in c each iteration xorq i, i Lbignum_optadd_loop: movq (x,i,8), a movq (y,i,8), b andq p, b negq c adcq b, a sbbq c, c movq a, (z,i,8) incq i cmpq k, i jc Lbignum_optadd_loop // Return top carry negq %rax Lbignum_optadd_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_optadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,281
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_madd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply-add, z := z + x * y // Inputs x[m], y[n]; outputs function return (carry-out) and z[k] // // extern uint64_t bignum_madd(uint64_t k, uint64_t *z, uint64_t m, // const uint64_t *x, uint64_t n, const uint64_t *y); // // Does the "z := x * y + z" operation, while also returning a "next" or // "carry" word. In the case where m + n <= p (i.e. the pure product would // fit in the destination) this is the remainder for the exact result. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = m, RCX = x, R8 = n, R9 = y, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = m, R9 = x, [RSP+40] = n, [RSP+48] = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_madd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_madd) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_madd) .text // These are actually right #define p %rdi #define z %rsi #define n %r8 // These are not #define c %r15 #define h %r14 #define l %r13 #define x %r12 #define y %r11 #define i %rbx #define k %r10 #define m %rbp // These are always local scratch since multiplier result is in these #define a %rax #define d %rdx S2N_BN_SYMBOL(bignum_madd): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 movq 64(%rsp), %r9 #endif // We use too many registers, and also we need %rax:%rdx for multiplications CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) movq %rdx, m // If the result size is zero, just return %rax = 0 // We could also do this if either input is size 0. xorq %rax, %rax testq p, p jz Lbignum_madd_end // Set initial 2-part sum to zero (we zero c inside the body) xorq h, h xorq l, l // Otherwise do outer loop k = 0 ... k = p - 1 xorq k, k Lbignum_madd_outerloop: // Zero our carry term first; we eventually want it and a zero is useful now // Set a = max 0 (k + 1 - n), i = min (k + 1) m // This defines the range a <= j < i for the inner summation // Note that since k < p < 2^64 we can assume k + 1 doesn't overflow // And since we want to increment it anyway, we might as well do it now xorq c, c // c = 0 incq k // k = k + 1 movq k, a // a = k + 1 subq n, a // a = k + 1 - n cmovcq c, a // a = max 0 (k + 1 - n) movq m, i // i = m cmpq m, k // CF <=> k + 1 < m cmovcq k, i // i = min (k + 1) m // Turn i into a loop count, and skip things if it's <= 0 // Otherwise set up initial pointers x -> x0[a] and y -> y0[k - a] // and then launch into the main inner loop, postdecrementing i movq k, d subq i, d subq a, i jbe Lbignum_madd_innerend leaq (%rcx,a,8), x leaq -8(%r9,d,8), y Lbignum_madd_innerloop: movq (y,i,8), %rax mulq (x) addq $8, x addq %rax, l adcq %rdx, h adcq $0, c decq i jnz Lbignum_madd_innerloop Lbignum_madd_innerend: addq l, (z) adcq $0, h adcq $0, c movq h, l movq c, h addq $8, z cmpq p, k jc Lbignum_madd_outerloop // Move the carry term into the return value movq l, %rax Lbignum_madd_end: CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_madd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,319
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_ctz.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Count trailing zero bits // Input x[k]; output function return // // extern uint64_t bignum_ctz(uint64_t k, const uint64_t *x); // // // In the case of a zero bignum as input the result is 64 * k // // In principle this has a precondition k < 2^58, but obviously that // is always true in practice because of address space limitations // // Standard x86-64 ABI: RDI = k, RSI = x, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_ctz) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_ctz) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_ctz) .text #define k %rdi #define x %rsi #define i %rdx #define w %rcx #define a %rax #define wshort %ecx S2N_BN_SYMBOL(bignum_ctz): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // If the bignum is zero-length, just return 0 xorq %rax, %rax testq k, k jz Lbignum_ctz_end // Use w = a[i-1] to store nonzero words in a top-down sweep // Set the initial default to be as if we had a 1 word directly above movq k, i incq i movl $1, wshort Lbignum_ctz_loop: movq -8(x,k,8), a testq a, a cmovneq k, i cmovneq a, w decq k jnz Lbignum_ctz_loop // Now w = a[i-1] is the lowest nonzero word, or in the zero case the // default of the "extra" 1 = a[k]. We now want 64*(i-1) + ctz(w). // Note that this code does not rely on the behavior of the BSF instruction // for zero inputs, which is undefined according to the manual. decq i shlq $6, i bsfq w, %rax addq i, %rax Lbignum_ctz_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_ctz) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,546
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_nonzero.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Test bignum for nonzero-ness x =/= 0 // Input x[k]; output function return // // extern uint64_t bignum_nonzero(uint64_t k, const uint64_t *x); // // Standard x86-64 ABI: RDI = k, RSI = x, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_nonzero) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_nonzero) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_nonzero) .text #define a %rax #define k %rdi #define x %rsi S2N_BN_SYMBOL(bignum_nonzero): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif xorq a, a testq k, k jz Lbignum_nonzero_end Lbignum_nonzero_loop: orq -8(x,k,8), a decq k jnz Lbignum_nonzero_loop // Set a standard C condition based on whether a is nonzero negq a sbbq a, a negq a Lbignum_nonzero_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_nonzero) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,258
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_bitfield.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Select bitfield starting at bit n with length l <= 64 // Inputs x[k], n, l; output function return // // extern uint64_t bignum_bitfield(uint64_t k, const uint64_t *x, uint64_t n, // uint64_t l); // // One-word bitfield from a k-digit (digit=64 bits) bignum, in constant-time // style. Bitfield starts at bit n and has length l, indexing from 0 (=LSB). // Digits above the top are treated uniformly as zero, as usual. Since the // result is returned in a single word, effectively we use l' = min(64,l) // for the length. // // Standard x86-64 ABI: RDI = k, RSI = x, RDX = n, RCX = l, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, R8 = n, R9 = l, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_bitfield) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_bitfield) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_bitfield) .text #define k %rdi #define x %rsi #define n %rdx #define l %rcx #define d %r8 #define e %rax #define i %r9 #define a %r10 #define m %r11 #define mshort %r11d S2N_BN_SYMBOL(bignum_bitfield): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Initialize second of digit pair to zero and if length is zero finish // immediately; the digit e is also the return value in RAX xorq e, e testq k, k jz Lbignum_bitfield_end // Decompose the index into n = 64 * n + m, then increment n for next part movl $63, mshort andq n, m shrq $6, n incq n // Run over the digits setting d = n'th and e = (n+1)'th xorq i, i Lbignum_bitfield_loop: movq (x,i,8), a cmpq n, i cmovcq a, d cmovzq a, e incq i cmpq k, i jc Lbignum_bitfield_loop // Put zero in a register, for several purposes xorq a, a // Override d with 0 if we ran off the end (e will retain original 0). cmpq n, i cmovcq a, d // Override e if we have m = 0 (i.e. original n was divisible by 64) // This is because then we want to shift it right by 64 below. testq m, m cmovzq a, e // Create a size-l bitmask first (while the shift is conveniently in CL) cmpq $64, l adcq a, a shlq %cl, a decq a // Combine shifted digits to get the bitfield(n,64) movq m, l shrq %cl, d negq %rcx shlq %cl, e orq d, e // Now mask it down to get bitfield (n,l) andq a, e Lbignum_bitfield_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_bitfield) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,443
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_even.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Test bignum for even-ness // Input x[k]; output function return // // extern uint64_t bignum_even(uint64_t k, const uint64_t *x); // // Standard x86-64 ABI: RDI = k, RSI = x, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_even) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_even) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_even) .text S2N_BN_SYMBOL(bignum_even): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Set default return value of 1 and finish if k = 0 (trivially even) movl $1, %eax testq %rdi, %rdi jz Lbignum_even_end // Otherwise XOR that initial 1 with the lowest bit of the input xorq (%rsi), %rax andq $1, %rax Lbignum_even_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_even) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,793
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_ctd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Count trailing zero digits (64-bit words) // Input x[k]; output function return // // extern uint64_t bignum_ctd(uint64_t k, const uint64_t *x); // // In the case of a zero bignum as input the result is k // // Standard x86-64 ABI: RDI = k, RSI = x, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_ctd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_ctd) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_ctd) .text #define k %rdi #define x %rsi #define i %rdx #define a %rax S2N_BN_SYMBOL(bignum_ctd): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // If the bignum is zero-length, just return 0 xorq %rax, %rax testq k, k jz Lbignum_ctd_end // Record in i that the lowest nonzero word is i - 1, where i = k + 1 means // that the bignum was entirely zero movq k, i incq i Lbignum_ctd_loop: movq -8(x,k,8), a testq a, a cmovneq k, i decq k jnz Lbignum_ctd_loop // We now want to return i - 1 decq i movq i, %rax Lbignum_ctd_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_ctd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,020
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_muladd10.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply bignum by 10 and add word: z := 10 * z + d // Inputs z[k], d; outputs function return (carry) and z[k] // // extern uint64_t bignum_muladd10(uint64_t k, uint64_t *z, uint64_t d); // // Although typically the input d < 10, this is not actually required. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = d, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = d, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_muladd10) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_muladd10) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_muladd10) .text #define k %rdi #define z %rsi #define d %rcx #define a %rax #define l %rax #define h %rdx #define i %r8 #define ten %r9 #define tenshort %r9d S2N_BN_SYMBOL(bignum_muladd10): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Move carry input to permanent home, and if k = 0 skip the main loop movq %rdx, d testq k, k jz Lbignum_muladd10_end // Simple loop xorq i, i movl $10, tenshort Lbignum_muladd10_loop: movq (z,i,8), a mulq ten addq d, l movq l, (z,i,8) adcq $0, h movq h, d incq i cmpq k, i jc Lbignum_muladd10_loop // Return the final carry Lbignum_muladd10_end: movq d, %rax #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_muladd10) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
19,014
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_modexp.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Modular exponentiation for arbitrary odd modulus // Inputs a[k], p[k], m[k]; output z[k], temporary buffer t[>=3*k] // // extern void bignum_modexp // (uint64_t k,uint64_t *z, const uint64_t *a,const uint64_t *p, // const uint64_t *m,uint64_t *t); // // Does z := (a^p) mod m where all numbers are k-digit and m is odd // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = a, RCX = p, R8 = m, R9 = t // Microsoft x64 ABI: RCX = k, RDX = z, R8 = a, R9 = p, [RSP+40] = m, [RSP+48] = t // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_modexp) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_modexp) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_modexp) .text // Local variables, all kept on the stack #define k (%rsp) #define res 8(%rsp) #define a 16(%rsp) #define p 24(%rsp) #define m 32(%rsp) #define x 40(%rsp) #define i 48(%rsp) #define y 56(%rsp) #define z 64(%rsp) #define VARSIZE 72 S2N_BN_SYMBOL(bignum_modexp): CFI_START _CET_ENDBR // The Windows version literally calls the standard ABI version. // This simplifies the proofs since subroutine offsets are fixed. #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 movq 64(%rsp), %r9 CFI_CALL(Lbignum_modexp_standard) CFI_POP(%rsi) CFI_POP(%rdi) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_modexp) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lbignum_modexp_standard) Lbignum_modexp_standard: CFI_START #endif // Real start of the standard ABI code. // Bump down the stack to make room for local variables CFI_DEC_RSP(VARSIZE) // If size is zero (which falsifies the oddness condition) do nothing testq %rdi, %rdi jz Lbignum_modexp_end // Set up local variables based on input parameters movq %rdi, k movq %rsi, res movq %rdx, a movq %rcx, p movq %r8, m movq %r9, x leaq (%r9,%rdi,8), %rax movq %rax, y leaq (%rax,%rdi,8), %rax movq %rax, z // Let x == 2^64k * a (mod m) and initialize z == 2^64k * 1 (mod m) movq k, %rdi movq z, %rsi movq m, %rdx movq y, %rcx CFI_CALL(Lbignum_modexp_local_amontifier) movq k, %rdi movq x, %rsi movq z, %rdx movq a, %rcx movq m, %r8 CFI_CALL(Lbignum_modexp_local_amontmul) movq k, %rdi movq z, %rsi movq z, %rdx movq m, %rcx CFI_CALL(Lbignum_modexp_local_demont) // Main loop with z == 2^64k * a^(p >> 2^i) (mod m) movq k, %rax shlq $6, %rax movq %rax, i Lbignum_modexp_loop: subq $1, %rax movq %rax, i movq k, %rdi movq y, %rsi movq z, %rdx movq z, %rcx movq m, %r8 CFI_CALL(Lbignum_modexp_local_amontmul) movq k, %rdi movq z, %rsi movq x, %rdx movq y, %rcx movq m, %r8 CFI_CALL(Lbignum_modexp_local_amontmul) movq i, %rdx movq %rdx, %rcx shrq $6, %rdx movq p, %rsi movq (%rsi,%rdx,8), %rdi shrq %cl, %rdi andq $1, %rdi movq k, %rsi movq z, %rdx movq z, %rcx movq y, %r8 CFI_CALL(Lbignum_modexp_local_mux) movq i, %rax testq %rax, %rax jnz Lbignum_modexp_loop // Convert back from Montgomery representation and copy the result // (via a degenerate case of multiplexing) into the output buffer movq k, %rdi movq z, %rsi movq z, %rdx movq m, %rcx CFI_CALL(Lbignum_modexp_local_demont) xorl %edi, %edi movq k, %rsi movq res, %rdx movq z, %rcx movq z, %r8 CFI_CALL(Lbignum_modexp_local_mux) // Restore the stack pointer and return Lbignum_modexp_end: CFI_INC_RSP(VARSIZE) CFI_RET #if WINDOWS_ABI S2N_BN_SIZE_DIRECTIVE(Lbignum_modexp_standard) #else S2N_BN_SIZE_DIRECTIVE(bignum_modexp) #endif // Local copy of bignum_amontifier S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lbignum_modexp_local_amontifier) Lbignum_modexp_local_amontifier: CFI_START CFI_PUSH(%rbp) CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) movq %rdx, %r12 movq %rcx, %r13 testq %rdi, %rdi je Lbignum_modexp_amontifier_end xorq %rbx, %rbx Lbignum_modexp_copyinloop: movq (%r12,%rbx,8), %rcx movq %rcx, (%r13,%rbx,8) incq %rbx cmpq %rdi, %rbx jb Lbignum_modexp_copyinloop movq %rdi, %rbx decq %rbx je Lbignum_modexp_normalized Lbignum_modexp_normloop: xorq %rbp, %rbp movq %rdi, %r11 negq %rcx movl $0x0, %eax Lbignum_modexp_shufloop: movq %rax, %rcx movq (%r13,%rbp,8), %rax cmovbq %rax, %rcx movq %rcx, (%r13,%rbp,8) incq %rbp decq %r11 jne Lbignum_modexp_shufloop decq %rbx jne Lbignum_modexp_normloop Lbignum_modexp_normalized: bsrq %rcx, %rcx xorq $0x3f, %rcx xorq %r9, %r9 xorq %rbx, %rbx Lbignum_modexp_bitloop: movq (%r13,%rbx,8), %rax movq %rax, %rbp shldq %cl, %r9, %rax movq %rax, (%r13,%rbx,8) movq %rbp, %r9 incq %rbx cmpq %rdi, %rbx jb Lbignum_modexp_bitloop movq -0x8(%r13,%rdi,8), %r11 movl $0x1, %r8d movq %r11, %r9 negq %r9 movl $0x3e, %ebx Lbignum_modexp_estloop: addq %r8, %r8 movq %r11, %rax subq %r9, %rax cmpq %rax, %r9 sbbq %rax, %rax notq %rax subq %rax, %r8 addq %r9, %r9 andq %r11, %rax subq %rax, %r9 decq %rbx jne Lbignum_modexp_estloop incq %r9 cmpq %r9, %r11 adcq $0x0, %r8 xorq %rcx, %rcx xorq %rbx, %rbx Lbignum_modexp_mulloop: movq (%r13,%rbx,8), %rax mulq %r8 addq %rcx, %rax adcq $0x0, %rdx movq %rax, (%rsi,%rbx,8) movq %rdx, %rcx incq %rbx cmpq %rdi, %rbx jb Lbignum_modexp_mulloop movabs $0x4000000000000000, %rax subq %rax, %rcx sbbq %r8, %r8 notq %r8 xorq %rcx, %rcx xorq %rbx, %rbx Lbignum_modexp_remloop: movq (%r13,%rbx,8), %rax andq %r8, %rax negq %rcx sbbq (%rsi,%rbx,8), %rax sbbq %rcx, %rcx movq %rax, (%rsi,%rbx,8) incq %rbx cmpq %rdi, %rbx jb Lbignum_modexp_remloop xorq %rcx, %rcx xorq %rbp, %rbp xorq %r9, %r9 Lbignum_modexp_dubloop1: movq (%rsi,%rbp,8), %rax shrdq $0x3f, %rax, %rcx negq %r9 sbbq (%r13,%rbp,8), %rcx sbbq %r9, %r9 movq %rcx, (%rsi,%rbp,8) movq %rax, %rcx incq %rbp cmpq %rdi, %rbp jb Lbignum_modexp_dubloop1 shrq $0x3f, %rcx addq %r9, %rcx xorq %rbp, %rbp xorq %r9, %r9 Lbignum_modexp_corrloop1: movq (%r13,%rbp,8), %rax andq %rcx, %rax negq %r9 adcq (%rsi,%rbp,8), %rax sbbq %r9, %r9 movq %rax, (%rsi,%rbp,8) incq %rbp cmpq %rdi, %rbp jb Lbignum_modexp_corrloop1 xorq %rcx, %rcx xorq %rbp, %rbp xorq %r9, %r9 Lbignum_modexp_dubloop2: movq (%rsi,%rbp,8), %rax shrdq $0x3f, %rax, %rcx negq %r9 sbbq (%r13,%rbp,8), %rcx sbbq %r9, %r9 movq %rcx, (%rsi,%rbp,8) movq %rax, %rcx incq %rbp cmpq %rdi, %rbp jb Lbignum_modexp_dubloop2 shrq $0x3f, %rcx addq %r9, %rcx xorq %rbp, %rbp xorq %r9, %r9 Lbignum_modexp_corrloop2: movq (%r13,%rbp,8), %rax andq %rcx, %rax negq %r9 adcq (%rsi,%rbp,8), %rax sbbq %r9, %r9 movq %rax, (%rsi,%rbp,8) movq %rax, (%r13,%rbp,8) incq %rbp cmpq %rdi, %rbp jb Lbignum_modexp_corrloop2 xorq %r11, %r11 movq %rdi, %rbx Lbignum_modexp_modloop: xorq %r9, %r9 movq %rdi, %r8 xorq %rbp, %rbp xorq %rcx, %rcx Lbignum_modexp_cmaloop: adcq %r9, %rcx sbbq %r10, %r10 movq (%rsi,%rbp,8), %rax mulq %r11 subq %r10, %rdx addq %rcx, %rax movq (%r13,%rbp,8), %r9 movq %rax, (%r13,%rbp,8) movq %rdx, %rcx incq %rbp decq %r8 jne Lbignum_modexp_cmaloop adcq %rcx, %r9 movq %r9, %r11 sbbq %r10, %r10 xorq %rbp, %rbp xorq %rcx, %rcx Lbignum_modexp_oaloop: movq (%r13,%rbp,8), %rax movq (%rsi,%rbp,8), %r9 andq %r10, %r9 negq %rcx adcq %r9, %rax sbbq %rcx, %rcx movq %rax, (%r13,%rbp,8) incq %rbp cmpq %rdi, %rbp jb Lbignum_modexp_oaloop subq %rcx, %r11 decq %rbx jne Lbignum_modexp_modloop movq (%r12), %rax movq %rax, %rcx movq %rax, %r9 shlq $0x2, %rcx subq %rcx, %r9 xorq $0x2, %r9 movq %r9, %rcx imulq %rax, %rcx movl $0x2, %eax addq %rcx, %rax addq $0x1, %rcx imulq %rax, %r9 imulq %rcx, %rcx movl $0x1, %eax addq %rcx, %rax imulq %rax, %r9 imulq %rcx, %rcx movl $0x1, %eax addq %rcx, %rax imulq %rax, %r9 imulq %rcx, %rcx movl $0x1, %eax addq %rcx, %rax imulq %rax, %r9 movq (%r13), %rcx imulq %rcx, %r9 movq (%r12), %rax mulq %r9 addq %rcx, %rax movq %rdx, %rcx movl $0x1, %ebp movq %rdi, %r8 decq %r8 je Lbignum_modexp_montifend Lbignum_modexp_montifloop: adcq (%r13,%rbp,8), %rcx sbbq %r10, %r10 movq (%r12,%rbp,8), %rax mulq %r9 subq %r10, %rdx addq %rcx, %rax movq %rax, -0x8(%r13,%rbp,8) movq %rdx, %rcx incq %rbp decq %r8 jne Lbignum_modexp_montifloop Lbignum_modexp_montifend: adcq %rcx, %r11 sbbq %r10, %r10 movq %r11, -0x8(%r13,%rdi,8) xorq %rbp, %rbp xorq %rcx, %rcx Lbignum_modexp_osloop: movq (%r13,%rbp,8), %rax movq (%r12,%rbp,8), %r9 andq %r10, %r9 negq %rcx sbbq %r9, %rax sbbq %rcx, %rcx movq %rax, (%rsi,%rbp,8) incq %rbp cmpq %rdi, %rbp jb Lbignum_modexp_osloop Lbignum_modexp_amontifier_end: CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) CFI_POP(%rbp) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lbignum_modexp_local_amontifier) // Local copy of bignum_amontmul S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lbignum_modexp_local_amontmul) Lbignum_modexp_local_amontmul: CFI_START CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) CFI_DEC_RSP(8) testq %rdi, %rdi je Lbignum_modexp_amont_end movq %rdx, %r9 movq (%r8), %rax movq %rax, %rdx movq %rax, %rbx shlq $0x2, %rdx subq %rdx, %rbx xorq $0x2, %rbx movq %rbx, %rdx imulq %rax, %rdx movl $0x2, %eax addq %rdx, %rax addq $0x1, %rdx imulq %rax, %rbx imulq %rdx, %rdx movl $0x1, %eax addq %rdx, %rax imulq %rax, %rbx imulq %rdx, %rdx movl $0x1, %eax addq %rdx, %rax imulq %rax, %rbx imulq %rdx, %rdx movl $0x1, %eax addq %rdx, %rax imulq %rax, %rbx movq %rbx, (%rsp) xorq %r13, %r13 xorq %rbx, %rbx Lbignum_modexp_zoop: movq %r13, (%rsi,%rbx,8) incq %rbx cmpq %rdi, %rbx jb Lbignum_modexp_zoop xorq %r14, %r14 Lbignum_modexp_outeramontloop: movq (%r9,%r13,8), %rbp xorq %rbx, %rbx xorq %r10, %r10 xorq %r15, %r15 movq %rdi, %r12 Lbignum_modexp_maddloop: adcq (%rsi,%rbx,8), %r10 sbbq %r11, %r11 movq (%rcx,%rbx,8), %rax mulq %rbp subq %r11, %rdx addq %r10, %rax movq %rax, (%rsi,%rbx,8) movq %rdx, %r10 incq %rbx decq %r12 jne Lbignum_modexp_maddloop adcq %r10, %r14 adcq %r15, %r15 movq (%rsi), %r11 movq (%rsp), %rbp imulq %r11, %rbp movq (%r8), %rax mulq %rbp addq %r11, %rax movq %rdx, %r10 movl $0x1, %ebx movq %rdi, %r12 decq %r12 je Lbignum_modexp_montend Lbignum_modexp_montloop: adcq (%rsi,%rbx,8), %r10 sbbq %r11, %r11 movq (%r8,%rbx,8), %rax mulq %rbp subq %r11, %rdx addq %r10, %rax movq %rax, -0x8(%rsi,%rbx,8) movq %rdx, %r10 incq %rbx decq %r12 jne Lbignum_modexp_montloop Lbignum_modexp_montend: adcq %r14, %r10 adcq $0x0, %r15 movq %r15, %r14 movq %r10, -0x8(%rsi,%rbx,8) incq %r13 cmpq %rdi, %r13 jb Lbignum_modexp_outeramontloop xorq %rbp, %rbp subq %r14, %rbp xorq %r11, %r11 xorq %rbx, %rbx Lbignum_modexp_acorrloop: movq (%r8,%rbx,8), %rax andq %rbp, %rax negq %r11 sbbq %rax, (%rsi,%rbx,8) sbbq %r11, %r11 incq %rbx cmpq %rdi, %rbx jb Lbignum_modexp_acorrloop Lbignum_modexp_amont_end: CFI_INC_RSP(8) CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lbignum_modexp_local_amontmul) // Local copy of bignum_demont S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lbignum_modexp_local_demont) Lbignum_modexp_local_demont: CFI_START CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) testq %rdi, %rdi je Lbignum_modexp_demont_end movq (%rcx), %rax movq %rax, %rbx movq %rax, %r8 shlq $0x2, %rbx subq %rbx, %r8 xorq $0x2, %r8 movq %r8, %rbx imulq %rax, %rbx movl $0x2, %eax addq %rbx, %rax addq $0x1, %rbx imulq %rax, %r8 imulq %rbx, %rbx movl $0x1, %eax addq %rbx, %rax imulq %rax, %r8 imulq %rbx, %rbx movl $0x1, %eax addq %rbx, %rax imulq %rax, %r8 imulq %rbx, %rbx movl $0x1, %eax addq %rbx, %rax imulq %rax, %r8 xorq %rbx, %rbx Lbignum_modexp_iloop: movq (%rdx,%rbx,8), %rax movq %rax, (%rsi,%rbx,8) incq %rbx cmpq %rdi, %rbx jb Lbignum_modexp_iloop xorq %r9, %r9 Lbignum_modexp_outerdemontloop: movq (%rsi), %r11 movq %r8, %rbp imulq %r11, %rbp movq (%rcx), %rax mulq %rbp addq %r11, %rax movq %rdx, %r10 movl $0x1, %ebx movq %rdi, %r12 decq %r12 je Lbignum_modexp_demontend Lbignum_modexp_demontloop: adcq (%rsi,%rbx,8), %r10 sbbq %r11, %r11 movq (%rcx,%rbx,8), %rax mulq %rbp subq %r11, %rdx addq %r10, %rax movq %rax, -0x8(%rsi,%rbx,8) movq %rdx, %r10 incq %rbx decq %r12 jne Lbignum_modexp_demontloop Lbignum_modexp_demontend: adcq $0x0, %r10 movq %r10, -0x8(%rsi,%rbx,8) incq %r9 cmpq %rdi, %r9 jb Lbignum_modexp_outerdemontloop xorq %rbx, %rbx movq %rdi, %r12 Lbignum_modexp_cmploop: movq (%rsi,%rbx,8), %rax sbbq (%rcx,%rbx,8), %rax incq %rbx decq %r12 jne Lbignum_modexp_cmploop sbbq %rbp, %rbp notq %rbp xorq %r11, %r11 xorq %rbx, %rbx Lbignum_modexp_dcorrloop: movq (%rcx,%rbx,8), %rax andq %rbp, %rax negq %r11 sbbq %rax, (%rsi,%rbx,8) sbbq %r11, %r11 incq %rbx cmpq %rdi, %rbx jb Lbignum_modexp_dcorrloop Lbignum_modexp_demont_end: CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lbignum_modexp_local_demont) // Local copy of bignum_mux S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lbignum_modexp_local_mux) Lbignum_modexp_local_mux: CFI_START testq %rsi, %rsi je Lbignum_modexp_muxend xorq %r9, %r9 negq %rdi Lbignum_modexp_muxloop: movq (%rcx,%r9,8), %rax movq (%r8,%r9,8), %rdi cmovae %rdi, %rax movq %rax, (%rdx,%r9,8) incq %r9 decq %rsi jne Lbignum_modexp_muxloop Lbignum_modexp_muxend: CFI_RET S2N_BN_SIZE_DIRECTIVE(Lbignum_modexp_local_mux) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,939
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_mux.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiplex/select z := x (if p nonzero) or z := y (if p zero) // Inputs p, x[k], y[k]; output z[k] // // extern void bignum_mux(uint64_t p, uint64_t k, uint64_t *z, const uint64_t *x, // const uint64_t *y); // // It is assumed that all numbers x, y and z have the same size k digits. // // Standard x86-64 ABI: RDI = p, RSI = k, RDX = z, RCX = x, R8 = y // Microsoft x64 ABI: RCX = p, RDX = k, R8 = z, R9 = x, [RSP+40] = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mux) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mux) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mux) .text #define b %rdi #define k %rsi #define z %rdx #define x %rcx #define y %r8 #define i %r9 #define a %rax S2N_BN_SYMBOL(bignum_mux): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif testq k, k jz Lbignum_mux_end // If length = 0 do nothing xorq i, i negq b // CF <=> (b != 0) Lbignum_mux_loop: movq (x,i,8), a movq (y,i,8), b cmovncq b, a // CF ? a : b movq a, (z,i,8) incq i decq k jnz Lbignum_mux_loop Lbignum_mux_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mux) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,218
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_emontredc.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Extended Montgomery reduce, returning results in input-output buffer // Inputs z[2*k], m[k], w; outputs function return (extra result bit) and z[2*k] // // extern uint64_t bignum_emontredc(uint64_t k, uint64_t *z, const uint64_t *m, // uint64_t w); // // Assumes that z initially holds a 2k-digit bignum z_0, m is a k-digit odd // bignum and m * w == -1 (mod 2^64). This function also uses z for the output // as well as returning a carry c of 0 or 1. This encodes two numbers: in the // lower half of the z buffer we have q = z[0..k-1], while the upper half // together with the carry gives r = 2^{64k}*c + z[k..2k-1]. These values // satisfy z_0 + q * m = 2^{64k} * r, i.e. r gives a raw (unreduced) Montgomery // reduction while q gives the multiplier that was used. Another way of // thinking of it is that if z' is the output z with the lower half replaced // with zeros, then z_0 + q * m = 2^{128k} * c + z'. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = m, RCX = w, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = m, R9 = w, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_emontredc) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_emontredc) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_emontredc) .text // Argument m comes in in %rdx but we copy it to %r8 #define k %rdi #define z %rsi #define m %r8 #define w %rcx // General temp, low part of product and mul input #define a %rax // General temp, High part of product #define b %rdx // Home for i'th digit or Montgomery multiplier #define d %rbx // Outer loop counter #define i %r9 // Inner loop counter #define j %r10 #define h %r11 #define e %r12 #define t %r13 #define c %r14 #define cshort %r14d #define jshort %r10d S2N_BN_SYMBOL(bignum_emontredc): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save registers CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) // Initialize top carry to zero immediately to catch the k = 0 case xorq c, c // If k = 0 the whole operation is trivial testq k, k jz Lbignum_emontredc_end // Move m into its permanent home since we need RDX for muls movq %rdx, m // Launch into the outer loop xorq i, i Lbignum_emontredc_outerloop: movq (z), e movq w, d imulq e, d movq (m), a mulq d movq d, (z) addq e, a // Will be zero but want the carry movq %rdx, h movl $1, jshort movq k, t decq t jz Lbignum_emontredc_montend Lbignum_emontredc_montloop: adcq (z,j,8), h sbbq e, e movq (m,j,8), a mulq d subq e, %rdx addq h, a movq a, (z,j,8) movq %rdx, h incq j decq t jnz Lbignum_emontredc_montloop Lbignum_emontredc_montend: adcq c, h movl $0, cshort adcq $0, c movq (z,k,8), a addq h, a movq a, (z,k,8) adcq $0, c // End of outer loop. addq $8, z // For simple indexing, z pointer moves incq i cmpq k, i jc Lbignum_emontredc_outerloop Lbignum_emontredc_end: // Put the top carry in the expected place, restore registers and return movq c, %rax CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_emontredc) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,829
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_cld.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Count leading zero digits (64-bit words) // Input x[k]; output function return // // extern uint64_t bignum_cld(uint64_t k, const uint64_t *x); // // In the case of a zero bignum as input the result is k // // Standard x86-64 ABI: RDI = k, RSI = x, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cld) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cld) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cld) .text #define k %rdi #define x %rsi #define i %rax #define a %rcx #define j %rdx S2N_BN_SYMBOL(bignum_cld): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Initialize the index i and also prepare default return value of 0 (i = %rax) xorq i, i // If the bignum is zero-length, just return k = 0 testq k, k jz Lbignum_cld_end // Run over the words j = 0..i-1, and set i := j + 1 when hitting nonzero a[j] xorq j, j Lbignum_cld_loop: movq (x,j,8), a incq j testq a, a cmovnzq j, i cmpq k, j jnz Lbignum_cld_loop negq %rax addq %rdi, %rax Lbignum_cld_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cld) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
13,329
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_amontifier.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Compute "amontification" constant z :== 2^{128k} (congruent mod m) // Input m[k]; output z[k]; temporary buffer t[>=k] // // extern void bignum_amontifier(uint64_t k, uint64_t *z, const uint64_t *m, // uint64_t *t); // // This is called "amontifier" because any other value x can now be mapped into // the almost-Montgomery domain with an almost-Montgomery multiplication by z. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = m, RCX = t // Microsoft x64 ABI: RCX = k, RDX = z, R8 = m, R9 = t // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_amontifier) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_amontifier) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_amontifier) .text #define k %rdi #define z %rsi // These two inputs get moved to different places since RCX and RDX are special #define m %r12 #define t %r13 // Other variables // Matters that c is RCX as CL=lo(c) is assumed in shifts #define i %rbx #define j %rbp #define a %rax #define c %rcx #define h %r11 #define l %r10 #define b %r9 #define n %r8 // Some aliases for the values b and n #define q %r8 #define r %r9 #define ashort %eax #define ishort %ebx #define jshort %ebp #define qshort %r8d S2N_BN_SYMBOL(bignum_amontifier): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save some additional registers for use, copy args out of RCX and RDX CFI_PUSH(%rbp) CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) movq %rdx, m movq %rcx, t // If k = 0 the whole operation is trivial testq k, k jz Lbignum_amontifier_end // Copy the input m into the temporary buffer t. The temporary register // c matters since we want it to hold the highest digit, ready for the // normalization phase. xorq i, i Lbignum_amontifier_copyinloop: movq (m,i,8), c movq c, (t,i,8) incq i cmpq k, i jc Lbignum_amontifier_copyinloop // Do a rather stupid but constant-time digit normalization, conditionally // shifting left (k-1) times based on whether the top word is zero. // With careful binary striding this could be O(k*log(k)) instead of O(k^2) // while still retaining the constant-time style. // The "neg c" sets the zeroness predicate (~CF) for the entire inner loop movq k, i decq i jz Lbignum_amontifier_normalized Lbignum_amontifier_normloop: xorq j, j movq k, h negq c movl $0, ashort Lbignum_amontifier_shufloop: movq a, c movq (t,j,8), a cmovcq a, c movq c, (t,j,8) incq j decq h jnz Lbignum_amontifier_shufloop decq i jnz Lbignum_amontifier_normloop // We now have the top digit nonzero, assuming the input was nonzero, // and as per the invariant of the loop above, c holds that digit. So // now just count c's leading zeros and shift t bitwise that many bits. // Note that we don't care about the result of bsr for zero inputs so // the simple xor-ing with 63 is safe. Lbignum_amontifier_normalized: bsrq c, c xorq $63, c xorq b, b xorq i, i Lbignum_amontifier_bitloop: movq (t,i,8), a movq a, j shldq %cl, b, a movq a, (t,i,8) movq j, b incq i cmpq k, i jc Lbignum_amontifier_bitloop // Let h be the high word of n, which in all the in-scope cases is >= 2^63. // Now successively form q = 2^i div h and r = 2^i mod h as i goes from // 64 to 126. We avoid just using division out of constant-time concerns // (at the least we would need to fix up h = 0 for out-of-scope inputs) and // don't bother with Newton-Raphson, since this stupid simple loop doesn't // contribute much of the overall runtime at typical sizes. movq -8(t,k,8), h movl $1, qshort movq h, r negq r movl $62, ishort Lbignum_amontifier_estloop: addq q, q movq h, a subq r, a cmpq a, r // CF <=> r < h - r <=> 2 * r < h sbbq a, a notq a // a = bitmask(2 * r >= h) subq a, q addq r, r andq h, a subq a, r decq i jnz Lbignum_amontifier_estloop // Strictly speaking the above loop doesn't quite give the true remainder // and quotient in the special case r = h = 2^63, so fix it up. We get // q = 2^63 - 1 and r = 2^63 and really want q = 2^63 and r = 0. This is // supererogatory, because the main property of q used below still holds // in this case unless the initial m = 1, and then anyway the overall // specification (congruence modulo m) holds degenerately. But it seems // nicer to get a "true" quotient and remainder. incq r cmpq r, h adcq $0, q // So now we have q and r with 2^126 = q * h + r (imagining r = 0 in the // fixed-up case above: note that we never actually use the computed // value of r below and so didn't adjust it). And we can assume the ranges // q <= 2^63 and r < h < 2^64. // // The idea is to use q as a first quotient estimate for a remainder // of 2^{p+62} mod n, where p = 64 * k. We have, splitting n into the // high and low parts h and l: // // 2^{p+62} - q * n = 2^{p+62} - q * (2^{p-64} * h + l) // = 2^{p+62} - (2^{p-64} * (q * h) + q * l) // = 2^{p+62} - 2^{p-64} * (2^126 - r) - q * l // = 2^{p-64} * r - q * l // // Note that 2^{p-64} * r < 2^{p-64} * h <= n // and also q * l < 2^63 * 2^{p-64} = 2^{p-1} <= n // so |diff| = |2^{p-64} * r - q * l| < n. // // If in fact diff >= 0 then it is already 2^{p+62} mod n. // otherwise diff + n is the right answer. // // To (maybe?) make the computation slightly easier we actually flip // the sign and compute d = q * n - 2^{p+62}. Then the answer is either // -d (when negative) or n - d; in either case we effectively negate d. // This negating tweak in fact spoils the result for cases where // 2^{p+62} mod n = 0, when we get n instead. However the only case // where this can happen is m = 1, when the whole spec holds trivially, // and actually the remainder of the logic below works anyway since // the latter part of the code only needs a congruence for the k-digit // result, not strict modular reduction (the doublings will maintain // the non-strict inequality). xorq c, c xorq i, i Lbignum_amontifier_mulloop: movq (t,i,8), %rax mulq q addq c, %rax adcq $0, %rdx movq %rax, (z,i,8) movq %rdx, c incq i cmpq k, i jc Lbignum_amontifier_mulloop // Now c is the high word of the product, so subtract 2^62 // and then turn it into a bitmask in q = h movq $0x4000000000000000, %rax subq a, c sbbq q, q notq q // Now do [c] * n - d for our final answer xorq c, c xorq i, i Lbignum_amontifier_remloop: movq (t,i,8), a andq q, a negq c sbbq (z,i,8), a sbbq c, c movq a, (z,i,8) incq i cmpq k, i jc Lbignum_amontifier_remloop // Now still need to do a couple of modular doublings to get us all the // way up to 2^{p+64} == r from initial 2^{p+62} == r (mod n). xorq c, c xorq j, j xorq b, b Lbignum_amontifier_dubloop1: movq (z,j,8), a shrdq $63, a, c negq b sbbq (t,j,8), c sbbq b, b movq c, (z,j,8) movq a, c incq j cmpq k, j jc Lbignum_amontifier_dubloop1 shrq $63, c addq b, c xorq j, j xorq b, b Lbignum_amontifier_corrloop1: movq (t,j,8), a andq c, a negq b adcq (z,j,8), a sbbq b, b movq a, (z,j,8) incq j cmpq k, j jc Lbignum_amontifier_corrloop1 // This is not exactly the same: we also copy output to t giving the // initialization t_1 = r == 2^{p+64} mod n for the main loop next. xorq c, c xorq j, j xorq b, b Lbignum_amontifier_dubloop2: movq (z,j,8), a shrdq $63, a, c negq b sbbq (t,j,8), c sbbq b, b movq c, (z,j,8) movq a, c incq j cmpq k, j jc Lbignum_amontifier_dubloop2 shrq $63, c addq b, c xorq j, j xorq b, b Lbignum_amontifier_corrloop2: movq (t,j,8), a andq c, a negq b adcq (z,j,8), a sbbq b, b movq a, (z,j,8) movq a, (t,j,8) incq j cmpq k, j jc Lbignum_amontifier_corrloop2 // We then successively generate (k+1)-digit values satisfying // t_i == 2^{p+64*i} mod n, each of which is stored in h::t. Finish // initialization by zeroing h initially xorq h, h // Then if t_i = 2^{p} * h + l // we have t_{i+1} == 2^64 * t_i // = (2^{p+64} * h) + (2^64 * l) // == r * h + l<<64 // Do this k more times so we end up == 2^{128*k+64}, one more than we want // // Writing B = 2^{64k}, the possible correction of adding r, which for // a (k+1)-digit result is equivalent to subtracting q = 2^{64*(k+1)} - r // would give the overall worst-case value minus q of // [ B * (B^k - 1) + (B - 1) * r ] - [B^{k+1} - r] // = B * (r - 1) < B^{k+1} so we keep inside k+1 digits as required. // // This implementation makes the shift implicit by starting b with the // "previous" digit (initially 0) to offset things by 1. movq k, i Lbignum_amontifier_modloop: xorq b, b movq k, n xorq j, j xorq c, c Lbignum_amontifier_cmaloop: adcq b, c sbbq l, l movq (z,j,8), %rax mulq h subq l, %rdx addq c, %rax movq (t,j,8), b movq %rax, (t,j,8) movq %rdx, c incq j decq n jnz Lbignum_amontifier_cmaloop adcq c, b movq b, h sbbq l, l xorq j, j xorq c, c Lbignum_amontifier_oaloop: movq (t,j,8), a movq (z,j,8), b andq l, b negq c adcq b, a sbbq c, c movq a, (t,j,8) incq j cmpq k, j jc Lbignum_amontifier_oaloop subq c, h decq i jnz Lbignum_amontifier_modloop // Now do one almost-Montgomery reduction w.r.t. the original m // which lops off one 2^64 from the congruence and, with the usual // almost-Montgomery correction, gets us back inside k digits for // the end result. movq (m), a movq a, c movq a, b shlq $2, c subq c, b xorq $2, b movq b, c imulq a, c movl $2, ashort addq c, a addq $1, c imulq a, b imulq c, c movl $1, ashort addq c, a imulq a, b imulq c, c movl $1, ashort addq c, a imulq a, b imulq c, c movl $1, ashort addq c, a imulq a, b movq (t), c imulq c, b movq (m), %rax mulq b addq c, %rax movq %rdx, c movl $1, jshort movq k, n decq n jz Lbignum_amontifier_montend Lbignum_amontifier_montloop: adcq (t,j,8), c sbbq l, l movq (m,j,8), %rax mulq b subq l, %rdx addq c, %rax movq %rax, -8(t,j,8) movq %rdx, c incq j decq n jnz Lbignum_amontifier_montloop Lbignum_amontifier_montend: adcq c, h sbbq l, l movq h, -8(t,k,8) xorq j, j xorq c, c Lbignum_amontifier_osloop: movq (t,j,8), a movq (m,j,8), b andq l, b negq c sbbq b, a sbbq c, c movq a, (z,j,8) incq j cmpq k, j jc Lbignum_amontifier_osloop Lbignum_amontifier_end: CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_amontifier) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,336
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/word_clz.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Count leading zero bits in a single word // Input a; output function return // // extern uint64_t word_clz(uint64_t a); // // Standard x86-64 ABI: RDI = a, returns RAX // Microsoft x64 ABI: RCX = a, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(word_clz) S2N_BN_FUNCTION_TYPE_DIRECTIVE(word_clz) S2N_BN_SYM_PRIVACY_DIRECTIVE(word_clz) .text S2N_BN_SYMBOL(word_clz): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi #endif // First do %rax = 63 - bsr(a), which is right except (maybe) for zero inputs bsrq %rdi, %rax xorq $63, %rax // Force return of 64 in the zero-input case movl $64, %edx testq %rdi, %rdi cmoveq %rdx, %rax #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(word_clz) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,762
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_sub.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Subtract, z := x - y // Inputs x[m], y[n]; outputs function return (carry-out) and z[p] // // extern uint64_t bignum_sub(uint64_t p, uint64_t *z, uint64_t m, // const uint64_t *x, uint64_t n, const uint64_t *y); // // Does the z := x - y operation, truncating modulo p words in general and // returning a top borrow (0 or 1) in the p'th place, only subtracting input // words below p (as well as m and n respectively) to get the diff and borrow. // // Standard x86-64 ABI: RDI = p, RSI = z, RDX = m, RCX = x, R8 = n, R9 = y, returns RAX // Microsoft x64 ABI: RCX = p, RDX = z, R8 = m, R9 = x, [RSP+40] = n, [RSP+48] = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sub) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sub) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sub) .text #define p %rdi #define z %rsi #define m %rdx #define x %rcx #define n %r8 #define y %r9 #define i %r10 #define a %rax #define ashort %eax S2N_BN_SYMBOL(bignum_sub): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 movq 64(%rsp), %r9 #endif // Zero the main index counter for both branches xorq i, i // First clamp the two input sizes m := min(p,m) and n := min(p,n) since // we'll never need words past the p'th. Can now assume m <= p and n <= p. // Then compare the modified m and n and branch accordingly cmpq m, p cmovcq p, m cmpq n, p cmovcq p, n cmpq n, m jc Lbignum_sub_ylonger // The case where x is longer or of the same size (p >= m >= n) subq m, p subq n, m incq m testq n, n jz Lbignum_sub_xtest Lbignum_sub_xmainloop: movq (x,i,8), a sbbq (y,i,8), a movq a, (z,i,8) incq i decq n jnz Lbignum_sub_xmainloop jmp Lbignum_sub_xtest Lbignum_sub_xtoploop: movq (x,i,8), a sbbq $0, a movq a, (z,i,8) incq i Lbignum_sub_xtest: decq m jnz Lbignum_sub_xtoploop sbbq a, a testq p, p jz Lbignum_sub_tailskip Lbignum_sub_tailloop: movq a, (z,i,8) incq i decq p jnz Lbignum_sub_tailloop Lbignum_sub_tailskip: negq a #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif ret // The case where y is longer (p >= n > m) Lbignum_sub_ylonger: subq n, p subq m, n testq m, m jz Lbignum_sub_ytoploop Lbignum_sub_ymainloop: movq (x,i,8), a sbbq (y,i,8), a movq a, (z,i,8) incq i decq m jnz Lbignum_sub_ymainloop Lbignum_sub_ytoploop: movl $0, ashort sbbq (y,i,8), a movq a, (z,i,8) incq i decq n jnz Lbignum_sub_ytoploop sbbq a, a testq p, p jnz Lbignum_sub_tailloop negq a #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sub) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,133
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_optsubadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Optionally subtract or add, z := x + sgn(p) * y interpreting p as signed // Inputs x[k], p, y[k]; outputs function return (carry-out) and z[k] // // extern uint64_t bignum_optsubadd(uint64_t k, uint64_t *z, const uint64_t *x, // uint64_t p, const uint64_t *y); // // If p has top bit set (i.e. is negative as a signed int) return z := x - y // Else if p is nonzero (i.e. is positive as a signed int) return z := x + y // Otherwise (i.e. p is zero) return z := x // // Return in RDI = the top carry, which will be 0 or 1, and appropriate for // addition or subtraction respectively (and always zero for p = 0) // // 2^{64*k} * -carryout + z = x - y [for subtraction] // 2^{64*k} * carryout + z = x + y [for addition] // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = x, RCX = p, R8 = y, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = x, R9 = p, [RSP+40] = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optsubadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optsubadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optsubadd) .text #define k %rdi #define z %rsi #define x %rdx #define p %rcx #define y %r8 #define c %rax #define i %r9 #define m %rcx #define q %r10 #define a %r11 S2N_BN_SYMBOL(bignum_optsubadd): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // Initialize top carry to zero in all cases (also return value) xorq c, c // If k = 0 do nothing testq k, k jz Lbignum_optsubadd_end // Turn the input p into two bitmasks, m indicating to use the y input at // all (same register as p) and q indicating a sign-flip movq p, q sarq $63, q negq p sbbq m, m // Generate an initial carry-in for the negating case only to add 1; this // is because we are actually going to do complements of the words of y movq q, c // Now go round the loop for i=0...k-1, saving the carry in c each iteration xorq i, i Lbignum_optsubadd_loop: movq (y,i,8), a xorq q, a andq m, a negq c adcq (x,i,8), a sbbq c, c movq a, (z,i,8) incq i cmpq k, i jc Lbignum_optsubadd_loop // Return carry flag, fixing up inversion for negative case xorq q, %rax negq %rax Lbignum_optsubadd_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_optsubadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
8,174
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_cdiv.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Divide by a single (nonzero) word, z := x / m and return x mod m // Inputs x[n], m; outputs function return (remainder) and z[k] // // extern uint64_t bignum_cdiv(uint64_t k, uint64_t *z, uint64_t n, // const uint64_t *x, uint64_t m); // // Does the "z := x / m" operation where x is n digits, result z is k. // Truncates the quotient in general, but always (for nonzero m) returns // the true remainder x mod m. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = n, RCX = x, R8 = m // Microsoft x64 ABI: RCX = k, RDX = z, R8 = n, R9 = x, [RSP+40] = m // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cdiv) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cdiv) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cdiv) .text #define k %rdi #define z %rsi #define m %r8 // These parameters get moved because of special uses for %rcx, %rdx #define n %r9 #define x %r10 // This needs to be in %rcx for variable shifts with %cl #define e %rcx // Other variables #define w %r11 #define d %r12 #define i %rbx #define c %r13 #define l %r14 #define a %rax #define h %rdx #define ashort %eax #define ishort %ebx #define hshort %edx // The remainder #define r %r15 S2N_BN_SYMBOL(bignum_cdiv): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Move parameters that need a new home movq %rdx, n movq %rcx, x // First do a modulus computation, slightly tweaked from bignum_cmod, // changing variables and avoiding modification of the size parameter. // Initialize l = 0 now for convenience (we eventually need to do it). // If the bignum is zero-length, l is already the right answer of 0 xorq l, l testq n, n jz Lbignum_cdiv_nomodulus bsrq m, e xorq $63, e shlq %cl, m movq m, r movq $0x1FFFFFFFFFFFF, w shrq $16, r xorq r, w incq r shrq $32, w movq r, h imulq w, h negq h movq h, a shrq $49, a imulq a, a shrq $34, h addq a, h orq $0x40000000, a imulq h, a shrq $30, a imulq w, a shlq $30, w addq a, w shrq $30, w movq r, h imulq w, h negq h shrq $24, h imulq w, h shlq $16, w shrq $24, h addq h, w movq r, h imulq w, h negq h shrq $32, h imulq w, h shlq $31, w shrq $17, h addq h, w movq m, a mulq w shrdq $60, h, a movq w, h shrq $33, h notq a imulq h, a shlq $1, w shrq $33, a addq a, w addq $1, w movq m, a sbbq $0, w mulq w addq m, h sbbq $0, w movq m, r imulq w, r negq r xorl hshort, hshort movq n, i Lbignum_cdiv_modloop: movq h, a mulq r addq -8(x,i,8), a adcq l, h movq a, l sbbq a, a andq r, a addq a, l adcq $0, h decq i jnz Lbignum_cdiv_modloop movq h, i movq w, a mulq h addq i, h sbbq r, r andq m, r movq h, a mulq m addq r, h xorq r, r subq a, l sbbq h, i cmovnzq m, r xorl ashort, ashort subq r, l sbbq a, i cmovnzq m, a subq a, l movq w, a mulq l addq l, h rcr $1, h shrq %cl, m xorq $63, e shrq %cl, h imulq m, h subq h, l movq l, r subq m, l Lbignum_cdiv_nomodulus: cmovncq l, r // If k = 0 then there's no more to be done testq k, k jz Lbignum_cdiv_end // Let e be the number of trailing zeros in m (we can ignore m = 0) bsfq m, e // Now just shift m right by e bits. So hereafter we can assume m is odd // but we first need to shift the input right by e bits then divide by m. shrq %cl, m // Compute the negated modular inverse w with w * m + 1 == 0 (mod 2^64) // This is essentially the same as word_negmodinv. movq m, a movq m, w shlq $2, a subq a, w xorq $2, w movq w, a imulq m, a movl $2, hshort addq a, h addq $1, a imulq h, w imulq a, a movl $1, hshort addq a, h imulq h, w imulq a, a movl $1, hshort addq a, h imulq h, w imulq a, a movl $1, hshort addq a, h imulq h, w // We have the remainder r, so now x = m * y + r for some quotient y // to be computed. Consider x' = x + (m - r) = m * (y + 1) and do a // Montgomery reduction, keeping the cofactor z. This gives us // x' + m * z = 2^{64k} * c where c <= m. Thus since x' = m * (y + 1) // we have // // m * (y + z + 1) = 2^{64k} * c // // This means m * (y + z + 1) == 0 (mod 2^{64k}), even when we truncate // x to k digits (if in fact k < n). Since m is odd, it's coprime to // 2^{64k} so we can cancel and get y + z + 1 == 0 (mod 2^{64k}), and // hence using logical complement y == ~z (mod 2^{64k}). Thus we can // write back the logical complements of the cofactor as the answer. // Start with carry word c = m - r/2^e to make the initial tweak // x' = x + (m - r); since we've shifted everything initially by e // we need to shift the remainder too before subtracting from the // shifted m. movq r, d shrq %cl, d movq m, c subq d, c xorl ishort, ishort // Unless n = 0, preload the zeroth digit and bump up the x pointer by // 8 and n down by 1, to ease indexing and comparison using the same // variable i in the main loop. When n = 0 we leave it alone, as the // comparison i < n will always fail and the x pointer is unused. xorq d, d testq n, n jz Lbignum_cdiv_loop movq (x), d addq $8, x decq n Lbignum_cdiv_loop: // Load the next digit up to get [l,d] then shift right e places xorq l, l cmpq n, i jnc Lbignum_cdiv_noload movq (x,i,8), l Lbignum_cdiv_noload: shrdq %cl, l, d addq c, d sbbq c, c negq c // Now the effective sum is [c,a] where the carry-in has been absorbed. // Do the main Montgomery step with the (odd) m, writing back ~q. Finally // set d to the next digit ready for the following iteration. movq w, a imulq d, a notq a movq a, (z,i,8) notq a mulq m addq d, a adcq h, c movq l, d incq i cmpq k, i jc Lbignum_cdiv_loop // Return the modulus Lbignum_cdiv_end: movq r, %rax CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cdiv) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,433
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_cmul.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply by a single word, z := c * y // Inputs c, y[n]; outputs function return (carry-out) and z[k] // // extern uint64_t bignum_cmul(uint64_t k, uint64_t *z, uint64_t c, uint64_t n, // const uint64_t *y); // // Does the "z := c * y" operation where y is n digits, result z is p. // Truncates the result in general unless p >= n + 1. // // The return value is a high/carry word that is meaningful when p >= n as // giving the high part of the result. Since this is always zero if p > n, // it is mainly of interest in the special case p = n, i.e. where the source // and destination have the same nominal size, when it gives the extra word // of the full result. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = c, RCX = n, R8 = y, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = c, R9 = n, [RSP+40] = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul) .text #define p %rdi #define z %rsi #define c %r9 #define n %rcx #define x %r8 #define i %r10 #define h %r11 S2N_BN_SYMBOL(bignum_cmul): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // First clamp the input size n := min(p,n) since we can never need to read // past the p'th term of the input to generate p-digit output. Now we can // assume that n <= p cmpq n, p cmovcq p, n // Initialize current input/output pointer offset i and high part h. // But then if n = 0 skip the multiplication and go to the tail part xorq h, h xorq i, i testq n, n jz Lbignum_cmul_tail // Move c into a safer register as multiplies overwrite %rdx movq %rdx, c // Initialization of the loop: [h,l] = c * x_0 movq (x), %rax mulq c movq %rax, (z) movq %rdx, h incq i cmpq n, i jz Lbignum_cmul_tail // Main loop doing the multiplications Lbignum_cmul_loop: movq (x,i,8), %rax mulq c addq h, %rax adcq $0, %rdx movq %rax, (z,i,8) movq %rdx, h incq i cmpq n, i jc Lbignum_cmul_loop // Add a tail when the destination is longer Lbignum_cmul_tail: cmpq p, i jnc Lbignum_cmul_end movq h, (z,i,8) xorq h, h incq i cmpq p, i jnc Lbignum_cmul_end Lbignum_cmul_tloop: movq h, (z,i,8) incq i cmpq p, i jc Lbignum_cmul_tloop // Return the high/carry word Lbignum_cmul_end: movq h, %rax #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cmul) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,434
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_clz.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Count leading zero bits // Input x[k]; output function return // // extern uint64_t bignum_clz(uint64_t k, const uint64_t *x); // // In the case of a zero bignum as input the result is 64 * k // // In principle this has a precondition k < 2^58, but obviously that // is always true in practice because of address space limitations // // Standard x86-64 ABI: RDI = k, RSI = x, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_clz) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_clz) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_clz) .text #define k %rdi #define x %rsi #define i %rax #define w %rdx #define a %rcx #define j %r8 S2N_BN_SYMBOL(bignum_clz): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Initialize the index i and also prepare default return value of 0 (i = %rax) xorq i, i // If the bignum is zero-length, just return 0 testq k, k jz Lbignum_clz_end // Use w = a[i-1] to store nonzero words in a bottom-up sweep // Set the initial default to be as if we had a 11...11 word directly below movq $-1, w xorq j, j Lbignum_clz_loop: movq (x,j,8), a incq j testq a, a cmovnzq j, i cmovnzq a, w cmpq k, j jnz Lbignum_clz_loop // Now w = a[i-1] is the highest nonzero word, or in the zero case the // default of the "extra" 11...11 = a[0-1]. We now want 64*(k - i) + clz(w) = // 64*(k - i) + (63 - bsr(w)). Note that this code does not rely on the // behavior of the bsr instruction for zero inputs, where it is undefined subq i, k shlq $6, k bsrq w, %rax xorq $63, %rax addq k, %rax Lbignum_clz_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_clz) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,209
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/word_bytereverse.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reverse the order of bytes in a 64-bit word // // extern uint64_t word_bytereverse(uint64_t a); // // Standard x86-64 ABI: RDI = a, returns RAX // Microsoft x64 ABI: RCX = a, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(word_bytereverse) S2N_BN_FUNCTION_TYPE_DIRECTIVE(word_bytereverse) S2N_BN_SYM_PRIVACY_DIRECTIVE(word_bytereverse) .text // Just uses the x86 BSWAP instruction, which does the job directly S2N_BN_SYMBOL(word_bytereverse): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi #endif movq %rdi, %rax bswapq %rax #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(word_bytereverse) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
6,151
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_cmod.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Find bignum modulo a single word // Input x[k], m; output function return // // extern uint64_t bignum_cmod(uint64_t k, const uint64_t *x, uint64_t m); // // Returns x mod m, assuming m is nonzero. // // Standard x86-64 ABI: RDI = k, RSI = x, RDX = m, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, R8 = m, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmod) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmod) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmod) .text #define k %rdi #define x %rsi // This has to be %rcx for variable shifts #define e %rcx // We share the same variable for m and n, just shifting left then right. // And h is kept in %rdx which does work despite the special operands of mul. #define m %r8 #define n %r8 #define w %r9 #define a %rax #define r %r10 #define h %rdx #define l %r11 #define ashort %eax #define hshort %edx S2N_BN_SYMBOL(bignum_cmod): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Initialize l = 0 now for convenience (we eventually need to do it). // If the bignum is zero-length, l is already the right answer of 0 xorq l, l testq k, k jz Lbignum_cmod_end // Move m into its permanent home (also used for n). // Find number of leading zeros of m and let n = 2^e m so that for an // in-scope (nonzero) input m we have n >= 2^63, e <= 63. movq %rdx, m bsrq m, e xorq $63, e shlq %cl, m // A near-clone of word_recip so 2^64 + w = ceil(2^128 / n) - 1 movq n, r movq $0x1FFFFFFFFFFFF, w shrq $16, r xorq r, w incq r shrq $32, w movq r, h imulq w, h negq h movq h, a shrq $49, a imulq a, a shrq $34, h addq a, h orq $0x40000000, a imulq h, a shrq $30, a imulq w, a shlq $30, w addq a, w shrq $30, w movq r, h imulq w, h negq h shrq $24, h imulq w, h shlq $16, w shrq $24, h addq h, w movq r, h imulq w, h negq h shrq $32, h imulq w, h shlq $31, w shrq $17, h addq h, w movq n, a mulq w shrdq $60, h, a movq w, h shrq $33, h notq a imulq h, a shlq $1, w shrq $33, a addq a, w addq $1, w movq n, a sbbq $0, w mulq w addq n, h sbbq $0, w // Take the residue r = 2^128 - (2^64 + w) * n, which by the above bound // we know fits in 64 bits. We know 2^128 == r (mod n) and hence (mod m). movq n, r imulq w, r negq r // Now just go down through the digits accumulating [h;l] == x (mod n) // by 2^64 * [h;l] + d = 2^128 * h + [l;d] == r * h + [l; d]. That addition // may overflow with a carry, say 2^128 + [h';l'] = r * h + [l; d], in // which case we subtract 2^128 - r (which is divisible by m and keeping // things in 128 bits we just add r). Thus the overall bound when we initially // overflow is r * h + [l; d] - (2^128 - r) = r * (h + 1) + [l; d] - 2^128 // < 2^128 so we stay inside 2 words xorl hshort, hshort Lbignum_cmod_loop: movq h, a mulq r addq -8(x,k,8), a adcq l, h movq a, l sbbq a, a andq r, a addq a, l adcq $0, h decq k jnz Lbignum_cmod_loop // Now do reciprocal multiplication to reduce the 2-word modular equivalent // [h;l] to the single word l. If we assume the truncations are as follows // 2^64 + w = 2^128 / n - epsilon (0 <= epsilon <= 1) // q = (w * h / 2^64) - delta (0 <= delta <= 1) // the net remainder is l + (h/2^64 * epsilon + delta) * n < l + 2 * n. // In general this needs two rounds of comparison to guarantee getting // into a single word (though one more mul could be used instead). // Also, the quotient estimate can overflow so we use r as extra addend // 2^64 * n when the initial addition overflows. The overall multiple // of n can't itself overflow, since we know it's an underestimate of // the initial residue. movq h, k // back up h for muls movq w, a mulq h addq k, h sbbq r, r andq n, r // So q = (r;h) movq h, a mulq n addq r, h xorq r, r subq a, l sbbq h, k // (k,l) = first reduction cmovnzq n, r xorl ashort, ashort subq r, l sbbq a, k cmovnzq n, a subq a, l // One more reciprocal multiplication to do a modular reduction, but now in // one word and in terms of the original m. For the quotient estimate we want // q = ((2^64 + w) * l) / 2^{128-e} = ((2^64 + w) * l) / 2^65 / 2^{63-e}. movq w, a mulq l addq l, h rcr $1, h shrq %cl, m xorq $63, e shrq %cl, h imulq m, h subq h, l // Note that since there is no neglected "low" part of the single word, // one round of correction suffices; in the analog of the above l = 0 // and hence the residue so far is already < 2 * m. movq l, a subq m, l Lbignum_cmod_end: cmovncq l, a #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cmod) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,158
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/word_popcount.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Count number of set bits in a single 64-bit word (population count) // Input a; output function return // // extern uint64_t word_popcount(uint64_t a); // // Standard x86-64 ABI: RDI = a, returns RAX // Microsoft x64 ABI: RCX = a, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(word_popcount) S2N_BN_FUNCTION_TYPE_DIRECTIVE(word_popcount) S2N_BN_SYM_PRIVACY_DIRECTIVE(word_popcount) .text S2N_BN_SYMBOL(word_popcount): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi #endif // The code is generated by gcc -O3 (version 11.4.0) from // // uint64_t word_popcount(uint64_t x) // { uint64_t x2 = x - ((x & UINT64_C(0xAAAAAAAAAAAAAAAA))>>1); // uint64_t x4 = (x2 & UINT64_C(0x3333333333333333)) + // ((x2 & UINT64_C(0xCCCCCCCCCCCCCCCC))>>2); // uint64_t x8 = (x4 + (x4>>4)) & UINT64_C(0x0F0F0F0F0F0F0F0F); // uint64_t x64 = x8 * UINT64_C(0x101010101010101); // uint64_t y = x64>>56; // return y; // } movabsq $0x5555555555555555, %rdx movq %rdi, %rax shrq $1, %rax andq %rdx, %rax subq %rax, %rdi movabsq $0x3333333333333333, %rax movq %rdi, %rdx andq %rax, %rdi shrq $0x2, %rdx andq %rax, %rdx addq %rdi, %rdx movq %rdx, %rax shrq $0x4, %rax addq %rdx, %rax movabsq $0xf0f0f0f0f0f0f0f, %rdx andq %rdx, %rax movabsq $0x101010101010101, %rdx imulq %rdx, %rax shrq $0x38, %rax #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(word_popcount) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
6,401
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_montredc.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery reduce, z := (x' / 2^{64p}) MOD m // Inputs x[n], m[k], p; output z[k] // // extern void bignum_montredc(uint64_t k, uint64_t *z, uint64_t n, // const uint64_t *x, const uint64_t *m, uint64_t p); // // Does a := (x' / 2^{64p}) mod m where x' = x if n <= p + k and in general // is the lowest (p+k) digits of x, assuming x' <= 2^{64p} * m. That is, // p-fold Montgomery reduction w.r.t. a k-digit modulus m giving a k-digit // answer. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = n, RCX = x, R8 = m, R9 = p // Microsoft x64 ABI: RCX = k, RDX = z, R8 = n, R9 = x, [RSP+40] = m, [RSP+48] = p // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montredc) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montredc) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montredc) .text // We copy n into %r10 but it comes in in %rdx originally #define k %rdi #define z %rsi #define n %r10 #define x %rcx #define m %r8 #define p %r9 // General temp, low part of product and mul input #define a %rax // General temp, High part of product #define b %rdx // Negated modular inverse #define w (%rsp) // Inner loop counter #define j %rbx // Home for i'th digit or Montgomery multiplier #define d %rbp #define h %r11 #define e %r12 #define t %r13 #define i %r14 #define c %r15 // Some more intuitive names for temp regs in initial word-level negmodinv. #define t1 %rbx #define t2 %r14 #define ashort %eax #define cshort %r15d #define jshort %ebx S2N_BN_SYMBOL(bignum_montredc): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 movq 64(%rsp), %r9 #endif // Save registers and allocate space on stack for non-register variable w CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) CFI_DEC_RSP(8) // If k = 0 the whole operation is trivial testq k, k jz Lbignum_montredc_end // Move n input into its permanent home, since we need %rdx for multiplications movq %rdx, n // Compute word-level negated modular inverse w for m == m[0]. movq (m), a movq a, t2 movq a, t1 shlq $2, t2 subq t2, t1 xorq $2, t1 movq t1, t2 imulq a, t2 movl $2, ashort addq t2, a addq $1, t2 imulq a, t1 imulq t2, t2 movl $1, ashort addq t2, a imulq a, t1 imulq t2, t2 movl $1, ashort addq t2, a imulq a, t1 imulq t2, t2 movl $1, ashort addq t2, a imulq a, t1 movq t1, w // Initialize z to the lowest k digits of the input, zero-padding if n < k. movq k, j cmpq k, n cmovcq n, j xorq i, i testq j, j jz Lbignum_montredc_padloop Lbignum_montredc_copyloop: movq (x,i,8), a movq a, (z,i,8) incq i cmpq j, i jc Lbignum_montredc_copyloop cmpq k, i jnc Lbignum_montredc_initialized xorq j, j Lbignum_montredc_padloop: movq j, (z,i,8) incq i cmpq k, i jc Lbignum_montredc_padloop Lbignum_montredc_initialized: xorq c, c // Now if p = 0 we just need the corrective tail, and even that is // only needed for the case when the input is exactly the modulus, // to maintain the <= 2^64p * n precondition testq p, p jz Lbignum_montredc_corrective // Outer loop, just doing a standard Montgomery reduction on z xorq i, i Lbignum_montredc_outerloop: movq (z), e movq w, d imulq e, d movq (m), a mulq d addq e, a // Will be zero but want the carry movq %rdx, h movl $1, jshort movq k, t decq t jz Lbignum_montredc_montend Lbignum_montredc_montloop: adcq (z,j,8), h sbbq e, e movq (m,j,8), a mulq d subq e, %rdx addq h, a movq a, -8(z,j,8) movq %rdx, h incq j decq t jnz Lbignum_montredc_montloop Lbignum_montredc_montend: adcq c, h movl $0, cshort adcq $0, c addq i, j cmpq n, j jnc Lbignum_montredc_offtheend movq (x,j,8), a addq a, h adcq $0, c Lbignum_montredc_offtheend: movq h, -8(z,k,8) // End of outer loop. incq i cmpq p, i jc Lbignum_montredc_outerloop // Now do a comparison of (c::z) with (0::m) to set a final correction mask // indicating that (c::z) >= m and so we need to subtract m. Lbignum_montredc_corrective: xorq j, j movq k, n Lbignum_montredc_cmploop: movq (z,j,8), a sbbq (m,j,8), a incq j decq n jnz Lbignum_montredc_cmploop sbbq $0, c sbbq d, d notq d // Now do a masked subtraction of m for the final reduced result. xorq e, e xorq j, j Lbignum_montredc_corrloop: movq (m,j,8), a andq d, a negq e sbbq a, (z,j,8) sbbq e, e incq j cmpq k, j jc Lbignum_montredc_corrloop Lbignum_montredc_end: CFI_INC_RSP(8) CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montredc) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,311
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/word_ctz.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Count trailing zero bits in a single word // Input a; output function return // // extern uint64_t word_ctz(uint64_t a); // // Standard x86-64 ABI: RDI = a, returns RAX // Microsoft x64 ABI: RCX = a, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(word_ctz) S2N_BN_FUNCTION_TYPE_DIRECTIVE(word_ctz) S2N_BN_SYM_PRIVACY_DIRECTIVE(word_ctz) .text S2N_BN_SYMBOL(word_ctz): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi #endif // First just do %rax = bsf(a), which is right except (maybe) for zero inputs bsfq %rdi, %rax // Force return of 64 in the zero-input case movl $64, %edx testq %rdi, %rdi cmoveq %rdx, %rax #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(word_ctz) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,904
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_demont.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from (almost-)Montgomery form z := (x / 2^{64k}) mod m // Inputs x[k], m[k]; output z[k] // // extern void bignum_demont(uint64_t k, uint64_t *z, const uint64_t *x, // const uint64_t *m); // // Does z := (x / 2^{64k}) mod m, hence mapping out of Montgomery domain. // In other words, this is a k-fold Montgomery reduction with same-size input. // This can handle almost-Montgomery inputs, i.e. any k-digit bignum. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = x, RCX = m // Microsoft x64 ABI: RCX = k, RDX = z, R8 = x, R9 = m // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_demont) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont) .text #define k %rdi #define z %rsi #define x %rdx #define m %rcx // General temp, low part of product and mul input #define a %rax // General temp, high part of product (no longer x) #define b %rdx // Negated modular inverse #define w %r8 // Outer loop counter #define i %r9 // Inner loop counter #define j %rbx // Home for Montgomery multiplier #define d %rbp #define h %r10 #define e %r11 #define n %r12 // A temp reg in the initial word-level negmodinv, same as j #define t %rbx #define ashort %eax #define jshort %ebx S2N_BN_SYMBOL(bignum_demont): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save registers CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) // If k = 0 the whole operation is trivial testq k, k jz Lbignum_demont_end // Compute word-level negated modular inverse w for m == m[0]. movq (m), a movq a, t movq a, w shlq $2, t subq t, w xorq $2, w movq w, t imulq a, t movl $2, ashort addq t, a addq $1, t imulq a, w imulq t, t movl $1, ashort addq t, a imulq a, w imulq t, t movl $1, ashort addq t, a imulq a, w imulq t, t movl $1, ashort addq t, a imulq a, w // Initially just copy the input to the output. It would be a little more // efficient but somewhat fiddlier to tweak the zeroth iteration below instead. // After this we never use x again and can safely recycle RDX for muls xorq j, j Lbignum_demont_iloop: movq (x,j,8), a movq a, (z,j,8) incq j cmpq k, j jc Lbignum_demont_iloop // Outer loop, just doing a standard Montgomery reduction on z xorq i, i Lbignum_demont_outerloop: movq (z), e movq w, d imulq e, d movq (m), a mulq d addq e, a // Will be zero but want the carry movq %rdx, h movl $1, jshort movq k, n decq n jz Lbignum_demont_montend Lbignum_demont_montloop: adcq (z,j,8), h sbbq e, e movq (m,j,8), a mulq d subq e, %rdx addq h, a movq a, -8(z,j,8) movq %rdx, h incq j decq n jnz Lbignum_demont_montloop Lbignum_demont_montend: adcq $0, h movq h, -8(z,j,8) // End of outer loop. incq i cmpq k, i jc Lbignum_demont_outerloop // Now do a comparison of z with m to set a final correction mask // indicating that z >= m and so we need to subtract m. xorq j, j movq k, n Lbignum_demont_cmploop: movq (z,j,8), a sbbq (m,j,8), a incq j decq n jnz Lbignum_demont_cmploop sbbq d, d notq d // Now do a masked subtraction of m for the final reduced result. xorq e, e xorq j, j Lbignum_demont_corrloop: movq (m,j,8), a andq d, a negq e sbbq a, (z,j,8) sbbq e, e incq j cmpq k, j jc Lbignum_demont_corrloop Lbignum_demont_end: CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_demont) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,949
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_montsqr.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^{64k}) mod m // Inputs x[k], m[k]; output z[k] // // extern void bignum_montsqr(uint64_t k, uint64_t *z, const uint64_t *x, // const uint64_t *m); // // Does z := (x^2 / 2^{64k}) mod m, assuming x^2 <= 2^{64k} * m, which is // guaranteed in particular if x < m initially (the "intended" case). // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = x, RCX = m // Microsoft x64 ABI: RCX = k, RDX = z, R8 = x, R9 = m // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr) .text // We copy x into %r9 but it comes in in %rdx originally #define k %rdi #define z %rsi #define x %r9 #define m %rcx // General temp, low part of product and mul input #define a %rax // General temp, High part of product #define b %rdx // Negated modular inverse #define w %r8 // Inner loop counter #define j %rbx // Home for i'th digit or Montgomery multiplier #define d %rbp #define h %r10 #define e %r11 #define n %r12 #define i %r13 #define c0 %r14 #define c1 %r15 // A temp reg in the initial word-level negmodinv. #define t2 %rdx #define ashort %eax #define jshort %ebx S2N_BN_SYMBOL(bignum_montsqr): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save registers CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // If k = 0 the whole operation is trivial testq k, k jz Lbignum_montsqr_end // Move x input into its permanent home, since we need %rdx for multiplications movq %rdx, x // Compute word-level negated modular inverse w for m == m[0]. movq (m), a movq a, t2 movq a, w shlq $2, t2 subq t2, w xorq $2, w movq w, t2 imulq a, t2 movl $2, ashort addq t2, a addq $1, t2 imulq a, w imulq t2, t2 movl $1, ashort addq t2, a imulq a, w imulq t2, t2 movl $1, ashort addq t2, a imulq a, w imulq t2, t2 movl $1, ashort addq t2, a imulq a, w // Initialize the output c0::z to zero so we can then consistently add rows. // It would be a bit more efficient to special-case the zeroth row, but // this keeps the code slightly simpler. xorq i, i // Also initializes i for main loop xorq j, j Lbignum_montsqr_zoop: movq i, (z,j,8) incq j cmpq k, j jc Lbignum_montsqr_zoop xorq c0, c0 // Outer loop pulling down digits d=x[i], multiplying by x and reducing Lbignum_montsqr_outerloop: // Multiply-add loop where we always have CF + previous high part h to add in. // Note that in general we do need yet one more carry in this phase and hence // initialize c1 with the top carry. movq (x,i,8), d xorq j, j xorq h, h xorq c1, c1 movq k, n Lbignum_montsqr_maddloop: adcq (z,j,8), h sbbq e, e movq (x,j,8), a mulq d subq e, %rdx addq h, a movq a, (z,j,8) movq %rdx, h incq j decq n jnz Lbignum_montsqr_maddloop adcq h, c0 adcq c1, c1 // Montgomery reduction loop, similar but offsetting writebacks movq (z), e movq w, d imulq e, d movq (m), a mulq d addq e, a // Will be zero but want the carry movq %rdx, h movl $1, jshort movq k, n decq n jz Lbignum_montsqr_montend Lbignum_montsqr_montloop: adcq (z,j,8), h sbbq e, e movq (m,j,8), a mulq d subq e, %rdx addq h, a movq a, -8(z,j,8) movq %rdx, h incq j decq n jnz Lbignum_montsqr_montloop Lbignum_montsqr_montend: adcq c0, h adcq $0, c1 movq c1, c0 movq h, -8(z,j,8) // End of outer loop. incq i cmpq k, i jc Lbignum_montsqr_outerloop // Now do a comparison of (c0::z) with (0::m) to set a final correction mask // indicating that (c0::z) >= m and so we need to subtract m. xorq j, j movq k, n Lbignum_montsqr_cmploop: movq (z,j,8), a sbbq (m,j,8), a incq j decq n jnz Lbignum_montsqr_cmploop sbbq $0, c0 sbbq d, d notq d // Now do a masked subtraction of m for the final reduced result. xorq e, e xorq j, j Lbignum_montsqr_corrloop: movq (m,j,8), a andq d, a negq e sbbq a, (z,j,8) sbbq e, e incq j cmpq k, j jc Lbignum_montsqr_corrloop Lbignum_montsqr_end: CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,321
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_shl_small.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Shift bignum left by c < 64 bits z := x * 2^c // Inputs x[n], c; outputs function return (carry-out) and z[k] // // extern uint64_t bignum_shl_small(uint64_t k, uint64_t *z, uint64_t n, // const uint64_t *x, uint64_t c); // // Does the "z := x << c" operation where x is n digits, result z is p. // The shift count c is masked to 6 bits so it actually uses c' = c mod 64. // The return value is the "next word" of a p+1 bit result, if n <= p. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = n, RCX = x, R8 = c, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = n, R9 = x, [RSP+40] = c, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_shl_small) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_shl_small) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_shl_small) .text #define p %rdi #define z %rsi #define n %rdx // These get moved from their initial positions #define c %rcx #define x %r9 // Other variables #define b %rax #define t %r8 #define a %r10 #define i %r11 S2N_BN_SYMBOL(bignum_shl_small): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // First clamp the input size n := min(p,n) since we can never need to read // past the p'th term of the input to generate p-digit output. cmpq n, p cmovcq p, n // Initialize "previous word" carry b to zero and main index i also to zero. // Then just skip the main loop if n = 0 xorq b, b xorq i, i testq n, n jz Lbignum_shl_small_tail // Reshuffle registers to put the shift count into CL movq %rcx, x movq %r8, c // Now the main loop Lbignum_shl_small_loop: movq (x,i,8), a movq a, t shldq %cl, b, a movq a, (z,i,8) movq t, b incq i cmpq n, i jc Lbignum_shl_small_loop // Shift the top word correspondingly. Using shld one more time is easier // than carefully producing a complementary shift with care over the zero case xorq a, a shldq %cl, b, a movq a, b // If we are at the end, finish, otherwise write carry word then zeros Lbignum_shl_small_tail: cmpq p, i jnc Lbignum_shl_small_end movq b, (z,i,8) xorq b, b incq i cmpq p, i jnc Lbignum_shl_small_end Lbignum_shl_small_tloop: movq b, (z,i,8) incq i cmpq p, i jc Lbignum_shl_small_tloop // Return, with RAX = b as the top word Lbignum_shl_small_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_shl_small) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,094
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_cmnegadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Negated multiply-add with single-word multiplier, z := z - c * y // Inputs c, y[n]; outputs function return (carry-out) and z[k] // // extern uint64_t bignum_cmnegadd(uint64_t k, uint64_t *z, uint64_t c, uint64_t n, // const uint64_t *y); // // Does the "z := z - c * y" operation where y is n digits, result z is p. // Truncates the result in general. // // The return value is a high/carry word that is meaningful when n <= p. // It is interpreted negatively as z' - 2^{64k} * return = z - c * y. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = c, RCX = n, R8 = y, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = c, R9 = n, [RSP+40] = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmnegadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmnegadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmnegadd) .text #define p %rdi #define z %rsi #define c %r9 #define n %rcx #define x %r8 #define i %r10 #define h %r11 #define r %rbx #define hshort %r11d #define ishort %r10d S2N_BN_SYMBOL(bignum_cmnegadd): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // Seems hard to avoid one more register CFI_PUSH(%rbx) // First clamp the input size n := min(p,n) since we can never need to read // past the p'th term of the input to generate p-digit output. // Subtract p := p - min(n,p) so it holds the size of the extra tail needed cmpq n, p cmovcq p, n subq n, p // Initialize high part h = 0; if n = 0 do nothing but return that zero xorq h, h testq n, n jz Lbignum_cmnegadd_end // Move c into a safer register as multiplies overwrite %rdx movq %rdx, c // Initialization of the loop: 2^64 * CF + [h,z_0'] = z_0 + c * ~x_0 + c movq (x), %rax notq %rax mulq c addq c, %rax adcq $0, %rdx addq %rax, (z) movq %rdx, h movl $1, ishort decq n jz Lbignum_cmnegadd_tail // Main loop, where we always have CF + previous high part h to add in Lbignum_cmnegadd_loop: adcq (z,i,8), h sbbq r, r movq (x,i,8), %rax notq %rax mulq c subq r, %rdx addq h, %rax movq %rax, (z,i,8) movq %rdx, h incq i decq n jnz Lbignum_cmnegadd_loop // At this point we have 2^{64n} * (h + CF) + z' = z + c * (2^{64n} - x) // so z' - 2^{64n} * (c - (h + CF)) = z - c * x. // Since z - c * x < 2^{64n} we must have c - (h + CF) >= 0. // Accumulate the negative carry in h for consistency with trivial cases. Lbignum_cmnegadd_tail: sbbq h, c movq c, h // Propagate the carry all the way to the end with h as extra carry word testq p, p jz Lbignum_cmnegadd_end subq h, (z,i,8) movl $0, hshort incq i decq p jz Lbignum_cmnegadd_highend Lbignum_cmnegadd_tloop: sbbq h, (z,i,8) incq i decq p jnz Lbignum_cmnegadd_tloop Lbignum_cmnegadd_highend: // Adjust the high word with the carry from subtraction adcq $0, h // Return the high/carry word Lbignum_cmnegadd_end: movq h, %rax CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cmnegadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,597
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_normalize.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Normalize bignum in-place by shifting left till top bit is 1 // Input z[k]; outputs function return (bits shifted left) and z[k] // // extern uint64_t bignum_normalize(uint64_t k, uint64_t *z); // // Given a k-digit bignum z, this function shifts it left by its number of // leading zero bits, to give result with top bit 1, unless the input number // was 0. The return is the same as the output of bignum_clz, i.e. the number // of bits shifted (nominally 64 * k in the case of zero input). // // Standard x86-64 ABI: RDI = k, RSI = z, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_normalize) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_normalize) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_normalize) .text #define k %rdi #define z %rsi // Return value, which we put in %rax to save a move or two #define r %rax // Other variables // Matters that c is RCX as CL=lo(c) is assumed in shifts #define b %r9 #define c %rcx #define d %rdx #define i %r8 #define j %r10 #define dshort %edx S2N_BN_SYMBOL(bignum_normalize): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Initialize shift count r = 0 and i = k - 1 but return immediately if k = 0. // Otherwise load top digit c, but then if k = 1 skip the digitwise part xorq r, r movq k, i subq $1, i jc Lbignum_normalize_end movq (z,i,8), c jz Lbignum_normalize_bitpart // Do d rather stupid but constant-time digit normalization, conditionally // shifting left (k-1) times based on whether the top word is zero. // With careful binary striding this could be O(k*log(k)) instead of O(k^2) // while still retaining the constant-time style. Lbignum_normalize_normloop: xorq j, j movq k, b movq r, d incq r negq c cmovneq d, r movl $0, dshort Lbignum_normalize_shufloop: movq d, c movq (z,j,8), d cmovcq d, c movq c, (z,j,8) incq j decq b jnz Lbignum_normalize_shufloop decq i jnz Lbignum_normalize_normloop // We now have the top digit nonzero, assuming the input was nonzero, // and as per the invariant of the loop above, c holds that digit. So // now just count c's leading zeros and shift z bitwise that many bits. // We need to patch the bsr result for the undefined case of zero input Lbignum_normalize_bitpart: movl $127, dshort bsrq c, c cmovzq d, c xorq $63, c shlq $6, r addq c, r xorq b, b xorq i, i Lbignum_normalize_bitloop: movq (z,i,8), d movq d, j shldq %cl, b, d movq d, (z,i,8) movq j, b incq i cmpq k, i jc Lbignum_normalize_bitloop Lbignum_normalize_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_normalize) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,239
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_modsub.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Subtract modulo m, z := (x - y) mod m, assuming x and y reduced // Inputs x[k], y[k], m[k]; output z[k] // // extern void bignum_modsub(uint64_t k, uint64_t *z, const uint64_t *x, // const uint64_t *y, const uint64_t *m); // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = x, RCX = y, R8 = m // Microsoft x64 ABI: RCX = k, RDX = z, R8 = x, R9 = y, [RSP+40] = m // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_modsub) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_modsub) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_modsub) .text #define k %rdi #define z %rsi #define x %rdx #define y %rcx #define m %r8 #define i %r9 #define j %r10 #define a %rax #define c %r11 S2N_BN_SYMBOL(bignum_modsub): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // If k = 0 do nothing testq k, k jz Lbignum_modsub_end // Subtract z := x - y and record a mask for the carry x - y < 0 xorq c, c movq k, j xorq i, i Lbignum_modsub_subloop: movq (x,i,8), a sbbq (y,i,8), a movq a, (z,i,8) incq i decq j jnz Lbignum_modsub_subloop sbbq c, c // Now do a masked addition z := z + [c] * m xorq i, i Lbignum_modsub_addloop: movq (m,i,8), a andq c, a negq j adcq a, (z,i,8) sbbq j, j incq i cmpq k, i jc Lbignum_modsub_addloop Lbignum_modsub_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_modsub) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
14,154
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_coprime.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Test bignums for coprimality, gcd(x,y) = 1 // Inputs x[m], y[n]; output function return; temporary buffer t[>=2*max(m,n)] // // extern uint64_t bignum_coprime(uint64_t m, const uint64_t *x, uint64_t n, // const uint64_t *y, uint64_t *t); // // Test for whether two bignums are coprime (no common factor besides 1). // This is equivalent to testing if their gcd is 1, but a bit faster than // doing those two computations separately. // // Here bignum x is m digits long, y is n digits long and the temporary // buffer t needs to be 2 * max(m,n) digits long. The return value is // 1 if coprime(x,y) and 0 otherwise. // // Standard x86-64 ABI: RDI = m, RSI = x, RDX = n, RCX = y, R8 = t, returns RAX // Microsoft x64 ABI: RCX = m, RDX = x, R8 = n, R9 = y, [RSP+40] = t, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_coprime) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_coprime) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_coprime) .text // We get CHUNKSIZE bits per outer iteration, 64 minus a bit for proxy errors #define CHUNKSIZE 58 // These variables are so fundamental we keep them consistently in registers. // m is in fact the temporary buffer argument w so use the same register #define m %r8 #define n %r15 #define k %r14 #define l %r13 // These are kept on the stack since there aren't enough registers #define mat_mm (%rsp) #define mat_mn 8(%rsp) #define mat_nm 16(%rsp) #define mat_nn 24(%rsp) #define t 32(%rsp) #define evenor 40(%rsp) #define STACKVARSIZE 48 // These are shorthands for common temporary register #define a %rax #define b %rbx #define c %rcx #define d %rdx #define i %r9 // Temporaries for the top proxy selection part #define c1 %r10 #define c2 %r11 #define h1 %r12 #define h2 %rbp #define l1 %rdi #define l2 %rsi // Re-use for the actual proxies; m_hi = h1 and n_hi = h2 are assumed #define m_hi %r12 #define n_hi %rbp #define m_lo %rdi #define n_lo %rsi // Re-use for the matrix entries in the inner loop, though they // get spilled to the corresponding memory locations mat_... #define m_m %r10 #define m_n %r11 #define n_m %rcx #define n_n %rdx #define ishort %r9d #define m_mshort %r10d #define m_nshort %r11d #define n_mshort %ecx #define n_nshort %edx // Because they are so unmemorable #define arg1 %rdi #define arg2 %rsi #define arg3 %rdx #define arg4 %rcx S2N_BN_SYMBOL(bignum_coprime): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // Save all required registers and make room on stack for all the above vars CFI_PUSH(%rbp) CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) CFI_DEC_RSP(STACKVARSIZE) // Compute k = max(m,n), and if this is zero skip to the end. Note that // in this case k is also in %rax so serves as the right answer of "false" movq arg1, %rax cmpq arg3, %rax cmovcq arg3, %rax movq %rax, k testq %rax, %rax jz Lbignum_coprime_end // Set up inside w two size-k buffers m and n leaq (m,k,8), n // Copy the input x into the buffer m, padding with zeros as needed xorq i, i testq arg1, arg1 jz Lbignum_coprime_xpadloop Lbignum_coprime_xloop: movq (arg2,i,8), a movq a, (m,i,8) incq i cmpq arg1, i jc Lbignum_coprime_xloop cmpq k, i jnc Lbignum_coprime_xskip Lbignum_coprime_xpadloop: movq $0, (m,i,8) incq i cmpq k, i jc Lbignum_coprime_xpadloop Lbignum_coprime_xskip: // Copy the input y into the buffer n, padding with zeros as needed xorq i, i testq arg3, arg3 jz Lbignum_coprime_ypadloop Lbignum_coprime_yloop: movq (arg4,i,8), a movq a, (n,i,8) incq i cmpq arg3, i jc Lbignum_coprime_yloop cmpq k, i jnc Lbignum_coprime_yskip Lbignum_coprime_ypadloop: movq $0, (n,i,8) incq i cmpq k, i jc Lbignum_coprime_ypadloop Lbignum_coprime_yskip: // Set up the outer loop count of 64 * sum of input sizes. // The invariant is that m * n < 2^t at all times. leaq (arg1,arg3), a shlq $6, a movq a, t // Record for the very end the OR of the lowest words. // If the bottom bit is zero we know both are even so the answer is false. // But since this is constant-time code we still execute all the main part. movq (m), a movq (n), b orq b, a movq a, evenor // Now if n is even trigger a swap of m and n. This ensures that if // one or other of m and n is odd then we make sure now that n is, // as expected by our invariant later on. andq $1, b subq $1, b xorq i, i Lbignum_coprime_swaploop: movq (m,i,8), a movq (n,i,8), c movq a, d xorq c, d andq b, d xorq d, a xorq d, c movq a, (m,i,8) movq c, (n,i,8) incq i cmpq k, i jnz Lbignum_coprime_swaploop // Start of the main outer loop iterated t / CHUNKSIZE times Lbignum_coprime_outerloop: // We need only bother with sharper l = min k (ceil(t/64)) digits // Either both m and n fit in l digits, or m has become zero and so // nothing happens in the loop anyway and this makes no difference. movq t, l addq $63, l shrq $6, l cmpq k, l cmovncq k, l // Select upper and lower proxies for both m and n to drive the inner // loop. The lower proxies are simply the lowest digits themselves, // m_lo = m[0] and n_lo = n[0], while the upper proxies are bitfields // of the two inputs selected so their top bit (63) aligns with the // most significant bit of *either* of the two inputs. xorq h1, h1 // Previous high and low for m xorq l1, l1 xorq h2, h2 // Previous high and low for n xorq l2, l2 xorq c2, c2 // Mask flag: previous word of one was nonzero // and in this case h1 and h2 are those words xorq i, i Lbignum_coprime_toploop: movq (m,i,8), b movq (n,i,8), c movq c2, c1 andq h1, c1 andq h2, c2 movq b, a orq c, a negq a cmovcq c1, l1 cmovcq c2, l2 cmovcq b, h1 cmovcq c, h2 sbbq c2, c2 incq i cmpq l, i jc Lbignum_coprime_toploop movq h1, a orq h2, a bsrq a, c xorq $63, c shldq %cl, l1, h1 shldq %cl, l2, h2 // m_lo = m[0], n_lo = n[0]; movq (m), %rax movq %rax, m_lo movq (n), %rax movq %rax, n_lo // Now the inner loop, with i as loop counter from CHUNKSIZE down. // This records a matrix of updates to apply to the initial // values of m and n with, at stage j: // // sgn * m' = (m_m * m - m_n * n) / 2^j // -sgn * n' = (n_m * m - n_n * n) / 2^j // // where "sgn" is either +1 or -1, and we lose track of which except // that both instance above are the same. This throwing away the sign // costs nothing (since we have to correct in general anyway because // of the proxied comparison) and makes things a bit simpler. But it // is simply the parity of the number of times the first condition, // used as the swapping criterion, fires in this loop. movl $1, m_mshort movl $0, m_nshort movl $0, n_mshort movl $1, n_nshort movl $CHUNKSIZE, ishort // Stash more variables over the inner loop to free up regs movq k, mat_mn movq l, mat_nm movq m, mat_mm movq n, mat_nn // Conceptually in the inner loop we follow these steps: // // * If m_lo is odd and m_hi < n_hi, then swap the four pairs // (m_hi,n_hi); (m_lo,n_lo); (m_m,n_m); (m_n,n_n) // // * Now, if m_lo is odd (old or new, doesn't matter as initial n_lo is odd) // m_hi := m_hi - n_hi, m_lo := m_lo - n_lo // m_m := m_m + n_m, m_n := m_n + n_n // // * Halve and double them // m_hi := m_hi / 2, m_lo := m_lo / 2 // n_m := n_m * 2, n_n := n_n * 2 // // The actual computation computes updates before actually swapping and // then corrects as needed. Lbignum_coprime_innerloop: xorl %eax, %eax xorl %ebx, %ebx xorq m, m xorq n, n btq $0, m_lo cmovcq n_hi, %rax cmovcq n_lo, %rbx cmovcq n_m, m cmovcq n_n, n movq m_lo, l subq %rbx, m_lo subq l, %rbx movq m_hi, k subq %rax, k cmovcq m_hi, n_hi leaq -1(k), m_hi cmovcq %rbx, m_lo cmovcq l, n_lo notq m_hi cmovcq m_m, n_m cmovcq m_n, n_n cmovncq k, m_hi shrq $1, m_lo addq m, m_m addq n, m_n shrq $1, m_hi addq n_m, n_m addq n_n, n_n // End of the inner for-loop decq i jnz Lbignum_coprime_innerloop // Unstash the temporary variables movq mat_mn, k movq mat_nm, l movq mat_mm, m movq mat_nn, n // Put the matrix entries in memory since we're out of registers // We pull them out repeatedly in the next loop movq m_m, mat_mm movq m_n, mat_mn movq n_m, mat_nm movq n_n, mat_nn // Now actually compute the updates to m and n corresponding to that matrix, // and correct the signs if they have gone negative. First we compute the // (k+1)-sized updates with the following invariant (here h1 and h2 are in // fact carry bitmasks, either 0 or -1): // // h1::l1::m = m_m * m - m_n * n // h2::l2::n = n_m * m - n_n * n xorq i, i xorq h1, h1 xorq l1, l1 xorq h2, h2 xorq l2, l2 Lbignum_coprime_crossloop: movq (m,i,8), c movq mat_mm, a mulq c addq a, l1 adcq $0, d movq d, c1 // Now c1::l1 is +ve part 1 movq mat_nm, a mulq c addq a, l2 adcq $0, d movq d, c2 // Now c2::l2 is +ve part 2 movq (n,i,8), c movq mat_mn, a mulq c subq h1, d // Now d::a is -ve part 1 subq a, l1 sbbq d, c1 sbbq h1, h1 movq l1, (m,i,8) movq c1, l1 movq mat_nn, a mulq c subq h2, d // Now d::a is -ve part 2 subq a, l2 sbbq d, c2 sbbq h2, h2 movq l2, (n,i,8) movq c2, l2 incq i cmpq l, i jc Lbignum_coprime_crossloop // Now fix the signs of m and n if they have gone negative xorq i, i movq h1, c1 // carry-in coded up as well movq h2, c2 // carry-in coded up as well xorq h1, l1 // for the Lbignum_coprime_end digit xorq h2, l2 // for the Lbignum_coprime_end digit Lbignum_coprime_optnegloop: movq (m,i,8), a xorq h1, a negq c1 adcq $0, a sbbq c1, c1 movq a, (m,i,8) movq (n,i,8), a xorq h2, a negq c2 adcq $0, a sbbq c2, c2 movq a, (n,i,8) incq i cmpq l, i jc Lbignum_coprime_optnegloop subq c1, l1 subq c2, l2 // Now shift them right CHUNKSIZE bits movq l, i Lbignum_coprime_shiftloop: movq -8(m,i,8), a movq a, h1 shrdq $CHUNKSIZE, l1, a movq a, -8(m,i,8) movq h1, l1 movq -8(n,i,8), a movq a, h2 shrdq $CHUNKSIZE, l2, a movq a, -8(n,i,8) movq h2, l2 decq i jnz Lbignum_coprime_shiftloop // End of main loop. We can stop if t' <= 0 since then m * n < 2^0, which // since n is odd (in the main cases where we had one or other input odd) // means that m = 0 and n is the final gcd. Moreover we do in fact need to // maintain strictly t > 0 in the main loop, or the computation of the // optimized digit bound l could collapse to 0. subq $CHUNKSIZE, t jnbe Lbignum_coprime_outerloop // Now compare n with 1 (OR of the XORs in a) movq (n), a xorq $1, a cmpq $1, k jz Lbignum_coprime_finalcomb movl $1, ishort Lbignum_coprime_compareloop: orq (n,i,8), a incq i cmpq k, i jc Lbignum_coprime_compareloop // Now combine that with original "evenor" oddness flag // The final condition is lsb(evenor) = 1 AND a = 0 Lbignum_coprime_finalcomb: negq a sbbq a, a incq a andq evenor, a // The end Lbignum_coprime_end: CFI_INC_RSP(STACKVARSIZE) CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_coprime) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,977
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_amontredc.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Almost-Montgomery reduce, z :== (x' / 2^{64p}) (congruent mod m) // Inputs x[n], m[k], p; output z[k] // // extern void bignum_amontredc(uint64_t k, uint64_t *z, uint64_t n, // const uint64_t *x, const uint64_t *m, uint64_t p); // // Does a :== (x' / 2^{64p}) mod m where x' = x if n <= p + k and in general // is the lowest (p+k) digits of x. That is, p-fold almost-Montgomery reduction // w.r.t. a k-digit modulus m giving a k-digit answer. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = n, RCX = x, R8 = m, R9 = p // Microsoft x64 ABI: RCX = k, RDX = z, R8 = n, R9 = x, [RSP+40] = m, [RSP+48] = p // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_amontredc) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_amontredc) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_amontredc) .text // We copy x into %r10 but it comes in in %rdx originally #define k %rdi #define z %rsi #define n %r10 #define x %rcx #define m %r8 #define p %r9 // General temp, low part of product and mul input #define a %rax // General temp, High part of product #define b %rdx // Negated modular inverse #define w (%rsp) // Inner loop counter #define j %rbx // Home for i'th digit or Montgomery multiplier #define d %rbp #define h %r11 #define e %r12 #define t %r13 #define i %r14 #define c %r15 // Some more intuitive names for temp regs in initial word-level negmodinv. #define t1 %rbx #define t2 %r14 #define ashort %eax #define cshort %r15d #define jshort %ebx S2N_BN_SYMBOL(bignum_amontredc): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 movq 64(%rsp), %r9 #endif // Save registers and allocate space on stack for non-register variable w CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) CFI_DEC_RSP(8) // If k = 0 the whole operation is trivial testq k, k jz Lbignum_amontredc_end // Move n input into its permanent home, since we need %rdx for multiplications movq %rdx, n // Compute word-level negated modular inverse w for m == m[0]. movq (m), a movq a, t2 movq a, t1 shlq $2, t2 subq t2, t1 xorq $2, t1 movq t1, t2 imulq a, t2 movl $2, ashort addq t2, a addq $1, t2 imulq a, t1 imulq t2, t2 movl $1, ashort addq t2, a imulq a, t1 imulq t2, t2 movl $1, ashort addq t2, a imulq a, t1 imulq t2, t2 movl $1, ashort addq t2, a imulq a, t1 movq t1, w // Initialize z to the lowest k digits of the input, zero-padding if n < k. movq k, j cmpq k, n cmovcq n, j xorq i, i testq j, j jz Lbignum_amontredc_padloop Lbignum_amontredc_copyloop: movq (x,i,8), a movq a, (z,i,8) incq i cmpq j, i jc Lbignum_amontredc_copyloop cmpq k, i jnc Lbignum_amontredc_initialized xorq j, j Lbignum_amontredc_padloop: movq j, (z,i,8) incq i cmpq k, i jc Lbignum_amontredc_padloop Lbignum_amontredc_initialized: xorq c, c // Now if p = 0 that's the end of the operation testq p, p jz Lbignum_amontredc_end // Outer loop, just doing a standard Montgomery reduction on z xorq i, i Lbignum_amontredc_outerloop: movq (z), e movq w, d imulq e, d movq (m), a mulq d addq e, a // Will be zero but want the carry movq %rdx, h movl $1, jshort movq k, t decq t jz Lbignum_amontredc_montend Lbignum_amontredc_montloop: adcq (z,j,8), h sbbq e, e movq (m,j,8), a mulq d subq e, %rdx addq h, a movq a, -8(z,j,8) movq %rdx, h incq j decq t jnz Lbignum_amontredc_montloop Lbignum_amontredc_montend: adcq c, h movl $0, cshort adcq $0, c addq i, j cmpq n, j jnc Lbignum_amontredc_offtheend movq (x,j,8), a addq a, h adcq $0, c Lbignum_amontredc_offtheend: movq h, -8(z,k,8) // End of outer loop. incq i cmpq p, i jc Lbignum_amontredc_outerloop // Now convert carry word, which is always in {0,1}, into a mask "d" // and do a masked subtraction of m for the final almost-Montgomery result. xorq d, d subq c, d xorq e, e xorq j, j Lbignum_amontredc_corrloop: movq (m,j,8), a andq d, a negq e sbbq a, (z,j,8) sbbq e, e incq j cmpq k, j jc Lbignum_amontredc_corrloop Lbignum_amontredc_end: CFI_INC_RSP(8) CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_amontredc) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,063
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_add.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Add, z := x + y // Inputs x[m], y[n]; outputs function return (carry-out) and z[p] // // extern uint64_t bignum_add(uint64_t p, uint64_t *z, uint64_t m, // const uint64_t *x, uint64_t n, const uint64_t *y); // // Does the z := x + y operation, truncating modulo p words in general and // returning a top carry (0 or 1) in the p'th place, only adding the input // words below p (as well as m and n respectively) to get the sum and carry. // // Standard x86-64 ABI: RDI = p, RSI = z, RDX = m, RCX = x, R8 = n, R9 = y, returns RAX // Microsoft x64 ABI: RCX = p, RDX = z, R8 = m, R9 = x, [RSP+40] = n, [RSP+48] = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_add) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_add) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_add) .text #define p %rdi #define z %rsi #define m %rdx #define x %rcx #define n %r8 #define y %r9 #define i %r10 #define a %rax #define ashort %eax S2N_BN_SYMBOL(bignum_add): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 movq 64(%rsp), %r9 #endif // Zero the main index counter for both branches xorq i, i // First clamp the two input sizes m := min(p,m) and n := min(p,n) since // we'll never need words past the p'th. Can now assume m <= p and n <= p. // Then compare the modified m and n and branch accordingly cmpq m, p cmovcq p, m cmpq n, p cmovcq p, n cmpq n, m jc Lbignum_add_ylonger // The case where x is longer or of the same size (p >= m >= n) subq m, p subq n, m incq m testq n, n jz Lbignum_add_xtest Lbignum_add_xmainloop: movq (x,i,8), a adcq (y,i,8), a movq a, (z,i,8) incq i decq n jnz Lbignum_add_xmainloop jmp Lbignum_add_xtest Lbignum_add_xtoploop: movq (x,i,8), a adcq $0, a movq a, (z,i,8) incq i Lbignum_add_xtest: decq m jnz Lbignum_add_xtoploop movl $0, ashort adcq $0, a testq p, p jnz Lbignum_add_tails #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif ret // The case where y is longer (p >= n > m) #if WINDOWS_ABI .cfi_def_cfa %rsp, 24 #else .cfi_def_cfa %rsp, 8 #endif Lbignum_add_ylonger: subq n, p subq m, n testq m, m jz Lbignum_add_ytoploop Lbignum_add_ymainloop: movq (x,i,8), a adcq (y,i,8), a movq a, (z,i,8) incq i decq m jnz Lbignum_add_ymainloop Lbignum_add_ytoploop: movq (y,i,8), a adcq $0, a movq a, (z,i,8) incq i decq n jnz Lbignum_add_ytoploop movl $0, ashort adcq $0, a testq p, p jnz Lbignum_add_tails #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif ret // Adding a non-trivial tail, when p > max(m,n) Lbignum_add_tails: movq a, (z,i,8) xorq a, a jmp Lbignum_add_tail Lbignum_add_tailloop: movq a, (z,i,8) Lbignum_add_tail: incq i decq p jnz Lbignum_add_tailloop #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_add) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,960
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/word_recip.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Single-word reciprocal, underestimate of 2^128 / a with implicit 1 added // Input a; output function return // // extern uint64_t word_recip(uint64_t a); // // Given an input word "a" with its top bit set (i.e. 2^63 <= a < 2^64), the // result "x" is implicitly augmented with a leading 1 giving x' = 2^64 + x. // The result is x' = ceil(2^128 / a) - 1, which except for the single // special case a = 2^63 is the same thing as x' = floor(2^128 / a). // // Standard x86-64 ABI: RDI = a, returns RAX // Microsoft x64 ABI: RCX = a, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(word_recip) S2N_BN_FUNCTION_TYPE_DIRECTIVE(word_recip) S2N_BN_SYM_PRIVACY_DIRECTIVE(word_recip) .text #define a %rdi #define x %rcx #define b %rsi # Some aliasing here #define t %rax #define l %rax #define d %rdx #define h %rdx S2N_BN_SYMBOL(word_recip): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi #endif // Scale the input down: b overestimates a/2^16 with b <= 2^48 and // x underestimates 2^64/b with b * x =~= 2^64, accurate to ~2 bits. movq a, b movq $0x1FFFFFFFFFFFF, x shrq $16, b xorq b, x incq b shrq $32, x // Suppose x = 2^64/b * (1 - e). and get scaled error d = 2^64 * e movq b, d imulq x, d negq d // Rescale to give c = 2^15 * e (so c <= 2^13) and compute // e + e^2 + e^3 + e^4 = (1 + e^2) (e + e^2) // = (2^30 + c^2) * (2^15 * c + c^2) / 2^60 // and then x * (1 + e + e^2 + e^3 + e^4) // = (2^30 * x + x * (2^30 + c^2) * (2^30 * c + c^2) / 2^30) / 2^30 movq d, t shrq $49, t imulq t, t shrq $34, d addq t, d orq $0x40000000, t imulq d, t shrq $30, t imulq x, t shlq $30, x addq t, x shrq $30, x // Now b * x =~= 2^64, accurate to ~10 bits. // Do a 64-bit Newton step, scaling up x by 16 bits in the process. movq b, d imulq x, d negq d shrq $24, d imulq x, d shlq $16, x shrq $24, d addq d, x // Now b * x =~= 2^80, accurate to ~20 bits. // Do a 64-bit Newton step, scaling up x by 31 bits in the process movq b, d imulq x, d negq d shrq $32, d imulq x, d shlq $31, x shrq $17, d addq d, x // Now a * x =~= 2^127, accurate to ~40 bits. Do a Newton step at full size. // Instead of literally negating the product (h,l) we complement bits in // the extracted bitfield, which is close enough and a bit faster. // At the end we also shift x one more bit left, losing the known-1 top bit // so that a * (2^64 + x) =~= 2^128. movq a, l mulq x shrdq $60, h, l movq x, h shrq $33, h notq l imulq h, l shlq $1, x shrq $33, l addq l, x // Test if (x' + 1) * a < 2^128 where x' = 2^64 + x, catching the special // case where x + 1 would wrap, corresponding to input a = 2^63. addq $1, x movq a, l sbbq $0, x mulq x movq x, %rax addq a, h // Select either x or x + 1 accordingly as the final answer sbbq $0, %rax #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(word_recip) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,773
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_amontsqr.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Almost-Montgomery square, z :== (x^2 / 2^{64k}) (congruent mod m) // Inputs x[k], m[k]; output z[k] // // extern void bignum_amontsqr(uint64_t k, uint64_t *z, const uint64_t *x, // const uint64_t *m); // // Does z :== (x^2 / 2^{64k}) mod m, meaning that the result, in the native // size k, is congruent modulo m, but might not be fully reduced mod m. This // is why it is called *almost* Montgomery squaring. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = x, RCX = m // Microsoft x64 ABI: RCX = k, RDX = z, R8 = x, R9 = m // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_amontsqr) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_amontsqr) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_amontsqr) .text // We copy x into %r9 but it comes in in %rdx originally #define k %rdi #define z %rsi #define x %r9 #define m %rcx // General temp, low part of product and mul input #define a %rax // General temp, High part of product #define b %rdx // Negated modular inverse #define w %r8 // Inner loop counter #define j %rbx // Home for i'th digit or Montgomery multiplier #define d %rbp #define h %r10 #define e %r11 #define n %r12 #define i %r13 #define c0 %r14 #define c1 %r15 // A temp reg in the initial word-level negmodinv. #define t2 %rdx #define ashort %eax #define jshort %ebx S2N_BN_SYMBOL(bignum_amontsqr): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save registers CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // If k = 0 the whole operation is trivial testq k, k jz Lbignum_amontsqr_end // Move x input into its permanent home, since we need %rdx for multiplications movq %rdx, x // Compute word-level negated modular inverse w for m == m[0]. movq (m), a movq a, t2 movq a, w shlq $2, t2 subq t2, w xorq $2, w movq w, t2 imulq a, t2 movl $2, ashort addq t2, a addq $1, t2 imulq a, w imulq t2, t2 movl $1, ashort addq t2, a imulq a, w imulq t2, t2 movl $1, ashort addq t2, a imulq a, w imulq t2, t2 movl $1, ashort addq t2, a imulq a, w // Initialize the output c0::z to zero so we can then consistently add rows. // It would be a bit more efficient to special-case the zeroth row, but // this keeps the code slightly simpler. xorq i, i // Also initializes i for main loop xorq j, j Lbignum_amontsqr_zoop: movq i, (z,j,8) incq j cmpq k, j jc Lbignum_amontsqr_zoop xorq c0, c0 // Outer loop pulling down digits d=x[i], multiplying by x and reducing Lbignum_amontsqr_outerloop: // Multiply-add loop where we always have CF + previous high part h to add in. // Note that in general we do need yet one more carry in this phase and hence // initialize c1 with the top carry. movq (x,i,8), d xorq j, j xorq h, h xorq c1, c1 movq k, n Lbignum_amontsqr_maddloop: adcq (z,j,8), h sbbq e, e movq (x,j,8), a mulq d subq e, %rdx addq h, a movq a, (z,j,8) movq %rdx, h incq j decq n jnz Lbignum_amontsqr_maddloop adcq h, c0 adcq c1, c1 // Montgomery reduction loop, similar but offsetting writebacks movq (z), e movq w, d imulq e, d movq (m), a mulq d addq e, a // Will be zero but want the carry movq %rdx, h movl $1, jshort movq k, n decq n jz Lbignum_amontsqr_montend Lbignum_amontsqr_montloop: adcq (z,j,8), h sbbq e, e movq (m,j,8), a mulq d subq e, %rdx addq h, a movq a, -8(z,j,8) movq %rdx, h incq j decq n jnz Lbignum_amontsqr_montloop Lbignum_amontsqr_montend: adcq c0, h adcq $0, c1 movq c1, c0 movq h, -8(z,j,8) // End of outer loop. incq i cmpq k, i jc Lbignum_amontsqr_outerloop // Now convert carry word, which is always in {0,1}, into a mask "d" // and do a masked subtraction of m for the final almost-Montgomery result. xorq d, d subq c0, d xorq e, e xorq j, j Lbignum_amontsqr_corrloop: movq (m,j,8), a andq d, a negq e sbbq a, (z,j,8) sbbq e, e incq j cmpq k, j jc Lbignum_amontsqr_corrloop Lbignum_amontsqr_end: CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_amontsqr) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,006
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_shr_small.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Shift bignum right by c < 64 bits z := floor(x / 2^c) // Inputs x[n], c; outputs function return (bits shifted out) and z[k] // // extern uint64_t bignum_shr_small(uint64_t k, uint64_t *z, uint64_t n, // const uint64_t *x, uint64_t c); // // Does the "z := x >> c" operation where x is n digits, result z is p. // The shift count c is masked to 6 bits so it actually uses c' = c mod 64. // The return value is the inout mod 2^c'. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = n, RCX = x, R8 = c, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = n, R9 = x, [RSP+40] = c, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_shr_small) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_shr_small) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_shr_small) .text #define p %rdi #define z %rsi #define n %rdx // These get moved from their initial positions #define c %rcx #define x %r9 // Other variables #define b %rax #define t %r8 #define a %r10 #define ashort %r10d S2N_BN_SYMBOL(bignum_shr_small): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // Reshuffle registers to put the shift count into CL movq %rcx, x movq %r8, c // Set default carry-in word to 0, useful for other things too xorq b, b // First, if p > n then pad output on the left with p-n zeros cmpq p, n jnc Lbignum_shr_small_nopad Lbignum_shr_small_padloop: decq p movq b, (z,p,8) cmpq p, n jc Lbignum_shr_small_padloop Lbignum_shr_small_nopad: // We now know that p <= n. If in fact p < n let carry word = x[p] instead of 0 jz Lbignum_shr_small_shiftstart movq (x,p,8), b Lbignum_shr_small_shiftstart: testq p, p jz Lbignum_shr_small_trivial // Now the main loop Lbignum_shr_small_loop: movq -8(x,p,8), a movq a, t shrdq %cl, b, a movq a, -8(z,p,8) movq t, b decq p jnz Lbignum_shr_small_loop // Mask the carry word and return with that as RAX = b Lbignum_shr_small_trivial: movl $1, ashort shlq %cl, a decq a andq a, b Lbignum_shr_small_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_shr_small) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
6,295
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_amontmul.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Almost-Montgomery multiply, z :== (x * y / 2^{64k}) (congruent mod m) // Inputs x[k], y[k], m[k]; output z[k] // // extern void bignum_amontmul(uint64_t k, uint64_t *z, const uint64_t *x, // const uint64_t *y, const uint64_t *m); // // Does z :== (x * y / 2^{64k}) mod m, meaning that the result, in the native // size k, is congruent modulo m, but might not be fully reduced mod m. This // is why it is called *almost* Montgomery multiplication. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = x, RCX = y, R8 = m // Microsoft x64 ABI: RCX = k, RDX = z, R8 = x, R9 = y, [RSP+40] = m // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_amontmul) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_amontmul) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_amontmul) .text // We copy x into %r9 but it comes in in %rdx originally #define k %rdi #define z %rsi #define x %r9 #define y %rcx #define m %r8 // General temp, low part of product and mul input #define a %rax // General temp, High part of product #define b %rdx // Inner loop counter #define j %rbx // Home for i'th digit or Montgomery multiplier #define d %rbp #define h %r10 #define e %r11 #define n %r12 #define i %r13 #define c0 %r14 #define c1 %r15 // This one variable we store on the stack as we are a register short. // At least it's only used once per iteration of the outer loop (k times) // and with a single read each time, after one initial write. The variable // is the word-level negated modular inverse #define w (%rsp) // Some more intuitive names for temp regs in initial word-level negmodinv. #define t1 %rbx #define t2 %rdx #define ashort %eax #define jshort %ebx S2N_BN_SYMBOL(bignum_amontmul): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // Save registers and allocate space on stack for non-register variable w CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) CFI_DEC_RSP(8) // If k = 0 the whole operation is trivial testq k, k jz Lbignum_amontmul_end // Move x input into its permanent home, since we need %rdx for multiplications movq %rdx, x // Compute word-level negated modular inverse w for m == m[0]. movq (m), a movq a, t2 movq a, t1 shlq $2, t2 subq t2, t1 xorq $2, t1 movq t1, t2 imulq a, t2 movl $2, ashort addq t2, a addq $1, t2 imulq a, t1 imulq t2, t2 movl $1, ashort addq t2, a imulq a, t1 imulq t2, t2 movl $1, ashort addq t2, a imulq a, t1 imulq t2, t2 movl $1, ashort addq t2, a imulq a, t1 movq t1, w // Initialize the output c0::z to zero so we can then consistently add rows. // It would be a bit more efficient to special-case the zeroth row, but // this keeps the code slightly simpler. xorq i, i // Also initializes i for main loop xorq j, j Lbignum_amontmul_zoop: movq i, (z,j,8) incq j cmpq k, j jc Lbignum_amontmul_zoop xorq c0, c0 // Outer loop pulling down digits d=x[i], multiplying by y and reducing Lbignum_amontmul_outerloop: // Multiply-add loop where we always have CF + previous high part h to add in. // Note that in general we do need yet one more carry in this phase and hence // initialize c1 with the top carry. movq (x,i,8), d xorq j, j xorq h, h xorq c1, c1 movq k, n Lbignum_amontmul_maddloop: adcq (z,j,8), h sbbq e, e movq (y,j,8), a mulq d subq e, %rdx addq h, a movq a, (z,j,8) movq %rdx, h incq j decq n jnz Lbignum_amontmul_maddloop adcq h, c0 adcq c1, c1 // Montgomery reduction loop, similar but offsetting writebacks movq (z), e movq w, d imulq e, d movq (m), a mulq d addq e, a // Will be zero but want the carry movq %rdx, h movl $1, jshort movq k, n decq n jz Lbignum_amontmul_montend Lbignum_amontmul_montloop: adcq (z,j,8), h sbbq e, e movq (m,j,8), a mulq d subq e, %rdx addq h, a movq a, -8(z,j,8) movq %rdx, h incq j decq n jnz Lbignum_amontmul_montloop Lbignum_amontmul_montend: adcq c0, h adcq $0, c1 movq c1, c0 movq h, -8(z,j,8) // End of outer loop. incq i cmpq k, i jc Lbignum_amontmul_outerloop // Now convert carry word, which is always in {0,1}, into a mask "d" // and do a masked subtraction of m for the final almost-Montgomery result. xorq d, d subq c0, d xorq e, e xorq j, j Lbignum_amontmul_corrloop: movq (m,j,8), a andq d, a negq e sbbq a, (z,j,8) sbbq e, e incq j cmpq k, j jc Lbignum_amontmul_corrloop Lbignum_amontmul_end: CFI_INC_RSP(8) CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_amontmul) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,417
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_divmod10.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Divide bignum by 10: z' := z div 10, returning remainder z mod 10 // Inputs z[k]; outputs function return (remainder) and z[k] // // extern uint64_t bignum_divmod10(uint64_t k, uint64_t *z); // // Standard x86-64 ABI: RDI = k, RSI = z, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_divmod10) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_divmod10) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_divmod10) .text #define k %rdi #define z %rsi #define d %rcx #define l %rdx #define r %rax #define q %r8 #define h %r8 #define s %r9 #define w %r10 #define rshort %eax #define wshort %r10d S2N_BN_SYMBOL(bignum_divmod10): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Initialize remainder to 0 and if k = 0 return xorl rshort, rshort testq k, k jz Lbignum_divmod10_end // Straightforward top-down loop doing 10 * q + r' := 2^64 * r + d movq $0x3333333333333334, s movl $0x3333333, wshort Lbignum_divmod10_divloop: movq -8(z,k,8), d // First re-split and shift so 2^28 * h + l = (2^64 * r + d) / 2 // Then (2^64 * r + d) / 10 = [(2^28 - 1) / 5] * h + (h + l) / 5 movq d, l shlq $35, l shldq $35, d, r shrq $36, l movq r, h addq l, r mulq s imulq w, h addq l, q movq q, -8(z,k,8) // Generate the new remainder r = d - 10 * q // Since r <= 9 we only need the low part computation ignoring carries leaq (q,q,4), q negq q leaq (d,q,2), r decq k jnz Lbignum_divmod10_divloop // Return %rax = r as the final remainder Lbignum_divmod10_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_divmod10) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,958
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_digit.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Select digit x[n] // Inputs x[k], n; output function return // // extern uint64_t bignum_digit(uint64_t k, const uint64_t *x, uint64_t n); // // n'th digit of a k-digit (digit=64 bits) bignum, in constant-time style. // Indexing starts at 0, which is the least significant digit (little-endian). // Returns zero if n >= k, i.e. we read a digit off the end of the bignum. // // Standard x86-64 ABI: RDI = k, RSI = x, RDX = n, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, R8 = n, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_digit) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_digit) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_digit) .text #define k %rdi #define x %rsi #define n %rdx #define d %rax #define i %rcx #define a %r8 S2N_BN_SYMBOL(bignum_digit): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Set the default digit to 0, and for length zero finish immediately xorq d, d testq k, k jz Lbignum_digit_end // Main loop: run over all the digits and take note of the n'th one xorq i, i Lbignum_digit_loop: movq (x,i,8), a cmpq n, i cmovzq a, d incq i cmpq k, i jc Lbignum_digit_loop // Return Lbignum_digit_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_digit) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,668
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_gt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Compare bignums, x > y // Inputs x[m], y[n]; output function return // // extern uint64_t bignum_gt(uint64_t m, const uint64_t *x, uint64_t n, // const uint64_t *y); // // Standard x86-64 ABI: RDI = m, RSI = x, RDX = n, RCX = y, returns RAX // Microsoft x64 ABI: RCX = m, RDX = x, R8 = n, R9 = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_gt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_gt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_gt) .text #define m %rdi #define x %rsi #define n %rdx #define y %rcx #define i %r8 #define a %rax #define ashort %eax S2N_BN_SYMBOL(bignum_gt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Zero the main index counter for both branches xorq i, i // Speculatively form n := n - m and do case split subq m, n jc Lbignum_gt_ylonger // The case where y is longer or of the same size (n >= m) incq n testq m, m jz Lbignum_gt_xtest Lbignum_gt_xmainloop: movq (y,i,8), a sbbq (x,i,8), a incq i decq m jnz Lbignum_gt_xmainloop jmp Lbignum_gt_xtest Lbignum_gt_xtoploop: movq (y,i,8), a sbbq $0, a incq i Lbignum_gt_xtest: decq n jnz Lbignum_gt_xtoploop sbbq a, a negq a #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif ret // The case where x is longer (m > n) Lbignum_gt_ylonger: addq m, n subq n, m testq n, n jz Lbignum_gt_ytoploop Lbignum_gt_ymainloop: movq (y,i,8), a sbbq (x,i,8), a incq i decq n jnz Lbignum_gt_ymainloop Lbignum_gt_ytoploop: movl $0, ashort sbbq (x,i,8), a incq i decq m jnz Lbignum_gt_ytoploop sbbq a, a negq a #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_gt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,669
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_le.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Compare bignums, x <= y // Inputs x[m], y[n]; output function return // // extern uint64_t bignum_le(uint64_t m, const uint64_t *x, uint64_t n, // const uint64_t *y); // // Standard x86-64 ABI: RDI = m, RSI = x, RDX = n, RCX = y, returns RAX // Microsoft x64 ABI: RCX = m, RDX = x, R8 = n, R9 = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_le) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_le) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_le) .text #define m %rdi #define x %rsi #define n %rdx #define y %rcx #define i %r8 #define a %rax #define ashort %eax S2N_BN_SYMBOL(bignum_le): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Zero the main index counter for both branches xorq i, i // Speculatively form n := n - m and do case split subq m, n jc Lbignum_le_ylonger // The case where y is longer or of the same size (n >= m) incq n testq m, m jz Lbignum_le_xtest Lbignum_le_xmainloop: movq (y,i,8), a sbbq (x,i,8), a incq i decq m jnz Lbignum_le_xmainloop jmp Lbignum_le_xtest Lbignum_le_xtoploop: movq (y,i,8), a sbbq $0, a incq i Lbignum_le_xtest: decq n jnz Lbignum_le_xtoploop sbbq a, a incq a #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif ret // The case where x is longer (m > n) Lbignum_le_ylonger: addq m, n subq n, m testq n, n jz Lbignum_le_ytoploop Lbignum_le_ymainloop: movq (y,i,8), a sbbq (x,i,8), a incq i decq n jnz Lbignum_le_ymainloop Lbignum_le_ytoploop: movl $0, ashort sbbq (x,i,8), a incq i decq m jnz Lbignum_le_ytoploop sbbq a, a incq a #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_le) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,467
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_eq.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Test bignums for equality, x = y // Inputs x[m], y[n]; output function return // // extern uint64_t bignum_eq(uint64_t m, const uint64_t *x, uint64_t n, // const uint64_t *y); // // Standard x86-64 ABI: RDI = m, RSI = x, RDX = n, RCX = y, returns RAX // Microsoft x64 ABI: RCX = m, RDX = x, R8 = n, R9 = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_eq) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_eq) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_eq) .text #define m %rdi #define x %rsi #define n %rdx #define y %rcx #define c %rax // We can re-use n for this, not needed when d appears #define d %rdx S2N_BN_SYMBOL(bignum_eq): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Initialize the accumulated OR of differences to zero xorq c, c // If m >= n jump into the m > n loop at the final equality test // This will drop through for m = n cmpq n, m jnc Lbignum_eq_mtest // Toploop for the case n > m Lbignum_eq_nloop: decq n orq (y,n,8), c cmpq n, m jnz Lbignum_eq_nloop jmp Lbignum_eq_mmain // Toploop for the case m > n (or n = m which enters at "mtest") Lbignum_eq_mloop: decq m orq (x,m,8), c cmpq n, m Lbignum_eq_mtest: jnz Lbignum_eq_mloop // Combined main loop for the min(m,n) lower words Lbignum_eq_mmain: testq m, m jz Lbignum_eq_end Lbignum_eq_loop: movq -8(x,m,8), d xorq -8(y,m,8), d orq d, c decq m jnz Lbignum_eq_loop // Set a standard C condition based on whether c is nonzero Lbignum_eq_end: negq c sbbq c, c incq c #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_eq) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,841
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_digitsize.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Return size of bignum in digits (64-bit word) // Input x[k]; output function return // // extern uint64_t bignum_digitsize(uint64_t k, const uint64_t *x); // // In the case of a zero bignum as input the result is 0 // // Standard x86-64 ABI: RDI = k, RSI = x, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_digitsize) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_digitsize) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_digitsize) .text #define k %rdi #define x %rsi #define i %rax #define a %rcx #define j %rdx S2N_BN_SYMBOL(bignum_digitsize): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Initialize the index i and also prepare default return value of 0 (i = %rax) xorq i, i // If the bignum is zero-length, just return 0 testq k, k jz Lbignum_digitsize_end // Run over the words j = 0..i-1, and set i := j + 1 when hitting nonzero a[j] xorq j, j Lbignum_digitsize_loop: movq (x,j,8), a incq j testq a, a cmovnzq j, i cmpq k, j jnz Lbignum_digitsize_loop Lbignum_digitsize_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_digitsize) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,436
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_optsub.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Optionally subtract, z := x - y (if p nonzero) or z := x (if p zero) // Inputs x[k], p, y[k]; outputs function return (carry-out) and z[k] // // extern uint64_t bignum_optsub(uint64_t k, uint64_t *z, const uint64_t *x, // uint64_t p, const uint64_t *y); // // It is assumed that all numbers x, y and z have the same size k digits. // Returns carry-out as per usual subtraction, always 0 if p was zero. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = x, RCX = p, R8 = y, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = x, R9 = p, [RSP+40] = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optsub) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optsub) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optsub) .text #define k %rdi #define z %rsi #define x %rdx #define p %rcx #define y %r8 #define i %r9 #define b %r10 #define c %rax #define a %r11 S2N_BN_SYMBOL(bignum_optsub): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // Initialize top carry to zero in all cases (also return value) xorq c, c // If k = 0 do nothing testq k, k jz Lbignum_optsub_end // Convert the nonzero/zero status of p into an all-1s or all-0s mask negq p sbbq p, p // Now go round the loop for i=0...k-1, saving the carry in c each iteration xorq i, i Lbignum_optsub_loop: movq (x,i,8), a movq (y,i,8), b andq p, b negq c sbbq b, a sbbq c, c movq a, (z,i,8) incq i cmpq k, i jc Lbignum_optsub_loop // Return top carry negq %rax Lbignum_optsub_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_optsub) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,532
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_iszero.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Test bignum for zero-ness, x = 0 // Input x[k]; output function return // // extern uint64_t bignum_iszero(uint64_t k, const uint64_t *x); // // Standard x86-64 ABI: RDI = k, RSI = x, returns RAX // Microsoft x64 ABI: RCX = k, RDX = x, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_iszero) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_iszero) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_iszero) .text #define a %rax #define k %rdi #define x %rsi S2N_BN_SYMBOL(bignum_iszero): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif xorq a, a testq k, k jz Lbignum_iszero_end Lbignum_iszero_loop: orq -8(x,k,8), a decq k jnz Lbignum_iszero_loop // Set a standard C condition based on whether a is nonzero negq a sbbq a, a Lbignum_iszero_end: incq a #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_iszero) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,022
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_copy.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Copy bignum with zero-extension or truncation, z := x // Input x[n]; output z[k] // // extern void bignum_copy(uint64_t k, uint64_t *z, uint64_t n, const uint64_t *x); // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = n, RCX = x // Microsoft x64 ABI: RCX = k, RDX = z, R8 = n, R9 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_copy) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_copy) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_copy) .text #define k %rdi #define z %rsi #define n %rdx #define x %rcx #define i %r8 #define a %rax S2N_BN_SYMBOL(bignum_copy): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Replace RDX = n with RDX = min(k,n) so we are definitely safe copying those // Initialize the element counter to 0 cmpq n, k cmovcq k, n xorq i, i // If min(k,n) = 0 jump to the padding stage testq n, n jz Lbignum_copy_padding Lbignum_copy_copyloop: movq (x,i,8), a movq a, (z,i,8) incq i cmpq n, i jc Lbignum_copy_copyloop Lbignum_copy_padding: cmpq k, i jnc Lbignum_copy_end xorq a, a Lbignum_copy_padloop: movq a, (z,i,8) incq i cmpq k, i jc Lbignum_copy_padloop Lbignum_copy_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_copy) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
15,488
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_montifier.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Compute "montification" constant z := 2^{128k} mod m // Input m[k]; output z[k]; temporary buffer t[>=k] // // extern void bignum_montifier(uint64_t k, uint64_t *z, const uint64_t *m, // uint64_t *t); // // The last argument points to a temporary buffer t that should have size >= k. // This is called "montifier" because given any other k-digit number x, // whether or not it's reduced modulo m, it can be mapped to its Montgomery // representation (2^{64k} * x) mod m just by Montgomery multiplication by z. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = m, RCX = t // Microsoft x64 ABI: RCX = k, RDX = z, R8 = m, R9 = t // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montifier) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montifier) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montifier) .text #define k %rdi #define z %rsi // These two inputs get moved to different places since RCX and RDX are special #define m %r12 #define t %r13 // Other variables #define i %rbx // Modular inverse; aliased to i, but we never use them together #define w %rbx #define j %rbp // Matters that this is RAX for special use in multiplies #define a %rax // Matters that this is RDX for special use in multiplies #define d %rdx // Matters that this is RCX as CL=lo(c) is assumed in shifts #define c %rcx #define h %r11 #define l %r10 #define b %r9 #define n %r8 // Some aliases for the values b and n #define q %r8 #define r %r9 #define ashort %eax #define ishort %ebx #define jshort %ebp #define qshort %r8d S2N_BN_SYMBOL(bignum_montifier): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save some additional registers for use, copy args out of RCX and RDX CFI_PUSH(%rbp) CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) movq %rdx, m movq %rcx, t // If k = 0 the whole operation is trivial testq k, k jz Lbignum_montifier_end // Copy the input m into the temporary buffer t. The temporary register // c matters since we want it to hold the highest digit, ready for the // normalization phase. xorq i, i Lbignum_montifier_copyinloop: movq (m,i,8), c movq c, (t,i,8) incq i cmpq k, i jc Lbignum_montifier_copyinloop // Do a rather stupid but constant-time digit normalization, conditionally // shifting left (k-1) times based on whether the top word is zero. // With careful binary striding this could be O(k*log(k)) instead of O(k^2) // while still retaining the constant-time style. // The "neg c" sets the zeroness predicate (~CF) for the entire inner loop movq k, i decq i jz Lbignum_montifier_normalized Lbignum_montifier_normloop: xorq j, j movq k, h negq c movl $0, ashort Lbignum_montifier_shufloop: movq a, c movq (t,j,8), a cmovcq a, c movq c, (t,j,8) incq j decq h jnz Lbignum_montifier_shufloop decq i jnz Lbignum_montifier_normloop // We now have the top digit nonzero, assuming the input was nonzero, // and as per the invariant of the loop above, c holds that digit. So // now just count c's leading zeros and shift t bitwise that many bits. // Note that we don't care about the result of bsr for zero inputs so // the simple xor-ing with 63 is safe. Lbignum_montifier_normalized: bsrq c, c xorq $63, c xorq b, b xorq i, i Lbignum_montifier_bitloop: movq (t,i,8), a movq a, j shldq %cl, b, a movq a, (t,i,8) movq j, b incq i cmpq k, i jc Lbignum_montifier_bitloop // Let h be the high word of n, which in all the in-scope cases is >= 2^63. // Now successively form q = 2^i div h and r = 2^i mod h as i goes from // 64 to 126. We avoid just using division out of constant-time concerns // (at the least we would need to fix up h = 0 for out-of-scope inputs) and // don't bother with Newton-Raphson, since this stupid simple loop doesn't // contribute much of the overall runtime at typical sizes. movq -8(t,k,8), h movl $1, qshort movq h, r negq r movl $62, ishort Lbignum_montifier_estloop: addq q, q movq h, a subq r, a cmpq a, r // CF <=> r < h - r <=> 2 * r < h sbbq a, a notq a // a = bitmask(2 * r >= h) subq a, q addq r, r andq h, a subq a, r decq i jnz Lbignum_montifier_estloop // Strictly speaking the above loop doesn't quite give the true remainder // and quotient in the special case r = h = 2^63, so fix it up. We get // q = 2^63 - 1 and r = 2^63 and really want q = 2^63 and r = 0. This is // supererogatory, because the main property of q used below still holds // in this case unless the initial m = 1, and then anyway the overall // specification (congruence modulo m) holds degenerately. But it seems // nicer to get a "true" quotient and remainder. incq r cmpq r, h adcq $0, q // So now we have q and r with 2^126 = q * h + r (imagining r = 0 in the // fixed-up case above: note that we never actually use the computed // value of r below and so didn't adjust it). And we can assume the ranges // q <= 2^63 and r < h < 2^64. // // The idea is to use q as a first quotient estimate for a remainder // of 2^{p+62} mod n, where p = 64 * k. We have, splitting n into the // high and low parts h and l: // // 2^{p+62} - q * n = 2^{p+62} - q * (2^{p-64} * h + l) // = 2^{p+62} - (2^{p-64} * (q * h) + q * l) // = 2^{p+62} - 2^{p-64} * (2^126 - r) - q * l // = 2^{p-64} * r - q * l // // Note that 2^{p-64} * r < 2^{p-64} * h <= n // and also q * l < 2^63 * 2^{p-64} = 2^{p-1} <= n // so |diff| = |2^{p-64} * r - q * l| < n. // // If in fact diff >= 0 then it is already 2^{p+62} mod n. // otherwise diff + n is the right answer. // // To (maybe?) make the computation slightly easier we actually flip // the sign and compute d = q * n - 2^{p+62}. Then the answer is either // -d (when negative) or n - d; in either case we effectively negate d. // This negating tweak in fact spoils the result for cases where // 2^{p+62} mod n = 0, when we get n instead. However the only case // where this can happen is m = 1, when the whole spec holds trivially, // and actually the remainder of the logic below works anyway since // the latter part of the code only needs a congruence for the k-digit // result, not strict modular reduction (the doublings will maintain // the non-strict inequality). xorq c, c xorq i, i Lbignum_montifier_mulloop: movq (t,i,8), a mulq q addq c, a adcq $0, d movq a, (z,i,8) movq d, c incq i cmpq k, i jc Lbignum_montifier_mulloop // Now c is the high word of the product, so subtract 2^62 // and then turn it into a bitmask in q = h movq $0x4000000000000000, a subq a, c sbbq q, q notq q // Now do [c] * n - d for our final answer xorq c, c xorq i, i Lbignum_montifier_remloop: movq (t,i,8), a andq q, a negq c sbbq (z,i,8), a sbbq c, c movq a, (z,i,8) incq i cmpq k, i jc Lbignum_montifier_remloop // Now still need to do a couple of modular doublings to get us all the // way up to 2^{p+64} == r from initial 2^{p+62} == r (mod n). xorq c, c xorq j, j xorq b, b Lbignum_montifier_dubloop1: movq (z,j,8), a shrdq $63, a, c negq b sbbq (t,j,8), c sbbq b, b movq c, (z,j,8) movq a, c incq j cmpq k, j jc Lbignum_montifier_dubloop1 shrq $63, c addq b, c xorq j, j xorq b, b Lbignum_montifier_corrloop1: movq (t,j,8), a andq c, a negq b adcq (z,j,8), a sbbq b, b movq a, (z,j,8) incq j cmpq k, j jc Lbignum_montifier_corrloop1 // This is not exactly the same: we also copy output to t giving the // initialization t_1 = r == 2^{p+64} mod n for the main loop next. xorq c, c xorq j, j xorq b, b Lbignum_montifier_dubloop2: movq (z,j,8), a shrdq $63, a, c negq b sbbq (t,j,8), c sbbq b, b movq c, (z,j,8) movq a, c incq j cmpq k, j jc Lbignum_montifier_dubloop2 shrq $63, c addq b, c xorq j, j xorq b, b Lbignum_montifier_corrloop2: movq (t,j,8), a andq c, a negq b adcq (z,j,8), a sbbq b, b movq a, (z,j,8) movq a, (t,j,8) incq j cmpq k, j jc Lbignum_montifier_corrloop2 // We then successively generate (k+1)-digit values satisfying // t_i == 2^{p+64*i} mod n, each of which is stored in h::t. Finish // initialization by zeroing h initially xorq h, h // Then if t_i = 2^{p} * h + l // we have t_{i+1} == 2^64 * t_i // = (2^{p+64} * h) + (2^64 * l) // == r * h + l<<64 // Do this 2*k more times so we end up == 2^{192*k+64}, one more than we want // // Writing B = 2^{64k}, the possible correction of adding r, which for // a (k+1)-digit result is equivalent to subtracting q = 2^{64*(k+1)} - r // would give the overall worst-case value minus q of // [ B * (B^k - 1) + (B - 1) * r ] - [B^{k+1} - r] // = B * (r - 1) < B^{k+1} so we keep inside k+1 digits as required. // // This implementation makes the shift implicit by starting b with the // "previous" digit (initially 0) to offset things by 1. leaq (k,k), i Lbignum_montifier_modloop: xorq b, b movq k, n xorq j, j xorq c, c Lbignum_montifier_cmaloop: adcq b, c sbbq l, l movq (z,j,8), a mulq h subq l, d addq c, a movq (t,j,8), b movq a, (t,j,8) movq d, c incq j decq n jnz Lbignum_montifier_cmaloop adcq c, b movq b, h sbbq l, l xorq j, j xorq c, c Lbignum_montifier_oaloop: movq (t,j,8), a movq (z,j,8), b andq l, b negq c adcq b, a sbbq c, c movq a, (t,j,8) incq j cmpq k, j jc Lbignum_montifier_oaloop subq c, h decq i jnz Lbignum_montifier_modloop // Compute the negated modular inverse w (same register as i, not used again). movq (m), a movq a, c movq a, w shlq $2, c subq c, w xorq $2, w movq w, c imulq a, c movl $2, ashort addq c, a addq $1, c imulq a, w imulq c, c movl $1, ashort addq c, a imulq a, w imulq c, c movl $1, ashort addq c, a imulq a, w imulq c, c movl $1, ashort addq c, a imulq a, w // Now do one almost-Montgomery reduction w.r.t. the original m // which lops off one 2^64 from the congruence and, with the usual // almost-Montgomery correction, gets us back inside k digits movq (t), c movq w, b imulq c, b movq (m), a mulq b addq c, a movq d, c movl $1, jshort movq k, n decq n jz Lbignum_montifier_amontend Lbignum_montifier_amontloop: adcq (t,j,8), c sbbq l, l movq (m,j,8), a mulq b subq l, d addq c, a movq a, -8(t,j,8) movq d, c incq j decq n jnz Lbignum_montifier_amontloop Lbignum_montifier_amontend: adcq c, h sbbq l, l movq h, -8(t,k,8) xorq j, j xorq c, c Lbignum_montifier_aosloop: movq (t,j,8), a movq (m,j,8), b andq l, b negq c sbbq b, a sbbq c, c movq a, (z,j,8) incq j cmpq k, j jc Lbignum_montifier_aosloop // So far, the code (basically a variant of bignum_amontifier) has produced // a k-digit value z == 2^{192k} (mod m), not necessarily fully reduced mod m. // We now do a short Montgomery reduction (similar to bignum_demont) so that // we achieve full reduction mod m while lopping 2^{64k} off the congruence. // We recycle h as the somewhat strangely-named outer loop counter. movq k, h Lbignum_montifier_montouterloop: movq (z), c movq w, b imulq c, b movq (m), a mulq b addq c, a movq d, c movl $1, jshort movq k, n decq n jz Lbignum_montifier_montend Lbignum_montifier_montloop: adcq (z,j,8), c sbbq l, l movq (m,j,8), a mulq b subq l, d addq c, a movq a, -8(z,j,8) movq d, c incq j decq n jnz Lbignum_montifier_montloop Lbignum_montifier_montend: adcq $0, c movq c, -8(z,k,8) decq h jnz Lbignum_montifier_montouterloop // Now do a comparison of z with m to set a final correction mask // indicating that z >= m and so we need to subtract m. xorq j, j movq k, n Lbignum_montifier_cmploop: movq (z,j,8), a sbbq (m,j,8), a incq j decq n jnz Lbignum_montifier_cmploop sbbq d, d notq d // Now do a masked subtraction of m for the final reduced result. xorq l, l xorq j, j Lbignum_montifier_corrloop: movq (m,j,8), a andq d, a negq l sbbq a, (z,j,8) sbbq l, l incq j cmpq k, j jc Lbignum_montifier_corrloop Lbignum_montifier_end: CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montifier) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,303
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_cdiv_exact.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Divide by a single word, z := x / m *when known to be exact* // Inputs x[n], m; output z[k] // // extern void bignum_cdiv_exact(uint64_t k, uint64_t *z, uint64_t n, // const uint64_t *x, uint64_t m); // // Does the "z := x / m" operation where x is n digits and result z is k, // *assuming* that m is nonzero and that the input x is in fact an // exact multiple of m. (If this isn't known, use the general bignum_cdiv // function instead.) In general the result is truncated to k digits. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = n, RCX = x, R8 = m // Microsoft x64 ABI: RCX = k, RDX = z, R8 = n, R9 = x, [RSP+40] = m // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cdiv_exact) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cdiv_exact) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cdiv_exact) .text #define k %rdi #define z %rsi #define m %r8 // These parameters get moved because of special uses for %rcx, %rdx #define n %r9 #define x %r10 // This needs to be in %rcx for variable shifts with %cl #define e %rcx // Other variables #define w %r11 #define d %r12 #define i %rbx #define c %r13 #define t %r14 #define a %rax #define h %rdx #define ishort %ebx #define hshort %edx S2N_BN_SYMBOL(bignum_cdiv_exact): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) // If k = 0 then there's nothing to be done testq k, k jz Lbignum_cdiv_exact_end // Move parameters that need a new home movq %rdx, n movq %rcx, x // Let e be the number of trailing zeros in m (we can ignore m = 0) bsfq m, e // Now just shift m right by e bits. So hereafter we can assume m is odd // but we first need to shift the input right by e bits then divide by m. shrq %cl, m // Compute the negated modular inverse w with w * m + 1 == 0 (mod 2^64) // This is essentially the same as word_negmodinv. movq m, a movq m, w shlq $2, a subq a, w xorq $2, w movq w, a imulq m, a movl $2, hshort addq a, h addq $1, a imulq h, w imulq a, a movl $1, hshort addq a, h imulq h, w imulq a, a movl $1, hshort addq a, h imulq h, w imulq a, a movl $1, hshort addq a, h imulq h, w // Consider x' = x + m and do a Montgomery reduction, keeping the cofactor z. // This gives us x' + m * z = 2^{64k} * c where c <= m. Assuming x = m * y // we then have m * y + m + m * z = 2^{64k} * c, i.e. // // m * (y + z + 1) = 2^{64k} * c // // This means m * (y + z + 1) == 0 (mod 2^{64k}), even when we truncate // x to k digits (if in fact k < n). Since m is odd, it's coprime to // 2^{64k} so we can cancel and get y + z + 1 == 0 (mod 2^{64k}), and // hence using logical complement y == ~z (mod 2^{64k}). Thus we can // write back the logical complements of the cofactor as the answer. // Start with carry word c = m to make the initial tweak x' = x + m. movq m, c xorl ishort, ishort // Unless n = 0, preload the zeroth digit and bump up the x pointer by // 8 and n down by 1, to ease indexing and comparison using the same // variable i in the main loop. When n = 0 we leave it alone, as the // comparison i < n will always fail and the x pointer is unused. xorq d, d testq n, n jz Lbignum_cdiv_exact_loop movq (x), d addq $8, x decq n Lbignum_cdiv_exact_loop: // Load the next digit up to get [t,d] then shift right e places xorq t, t cmpq n, i jnc Lbignum_cdiv_exact_noload movq (x,i,8), t Lbignum_cdiv_exact_noload: shrdq %cl, t, d addq c, d sbbq c, c negq c // Now the effective sum is [c,a] where the carry-in has been absorbed. // Do the main Montgomery step with the (odd) m, writing back ~q. Finally // set d to the next digit ready for the following iteration. movq w, a imulq d, a notq a movq a, (z,i,8) notq a mulq m addq d, a adcq h, c movq t, d incq i cmpq k, i jc Lbignum_cdiv_exact_loop Lbignum_cdiv_exact_end: CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cdiv_exact) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,325
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_optneg.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Optionally negate, z := -x (if p nonzero) or z := x (if p zero) // Inputs p, x[k]; outputs function return (nonzero input) and z[k] // // extern uint64_t bignum_optneg(uint64_t k, uint64_t *z, uint64_t p, // const uint64_t *x); // // It is assumed that both numbers x and z have the same size k digits. // Returns a carry, which is equivalent to "x is nonzero". // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = p, RCX = x, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = p, R9 = x, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optneg) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optneg) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optneg) .text #define k %rdi #define z %rsi #define p %rdx #define x %rcx #define c %rax #define a %r8 #define i %r9 #define cshort %eax S2N_BN_SYMBOL(bignum_optneg): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // If k = 0 do nothing, but need to set zero return for the carry (c = %rax) xorq c, c testq k, k jz Lbignum_optneg_end // Convert p into a strict bitmask and set initial carry-in in c negq p sbbq p, p subq p, c // Main loop xorq i, i Lbignum_optneg_loop: movq (x,i,8), a xorq p, a addq c, a movl $0, cshort movq a, (z,i,8) adcq $0, c incq i cmpq k, i jc Lbignum_optneg_loop // Return carry flag, fixing up inversion for negative case xorq p, %rax andq $1, %rax Lbignum_optneg_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_optneg) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,058
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_of_word.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert single digit to bignum, z := n // Input n; output z[k] // // extern void bignum_of_word(uint64_t k, uint64_t *z, uint64_t n); // // Create a k-digit (digit=64 bits) bignum at z with value n (mod 2^k) // where n is a word. The "mod 2^k" only matters in the degenerate k = 0 case. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = n // Microsoft x64 ABI: RCX = k, RDX = z, R8 = n // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_of_word) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_of_word) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_of_word) .text S2N_BN_SYMBOL(bignum_of_word): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // If k = 0 do nothing testq %rdi, %rdi jz Lbignum_of_word_end Lbignum_of_word_nontrivial: // Write lowest word and jump to end if k = 1 movq %rdx, (%rsi) decq %rdi jz Lbignum_of_word_end // Zero %rdx and write it to all z[i] for i = k-1 down to 1 // It's a bit more compact to iterate "high to low" like this. // But at the cost of bumping up %rsi by lea %rsi, [%rsi+8] // each time round the loop (which also modifies one more reg) // we could go "low to high" if it helps with prefetch etc. xorq %rdx, %rdx Lbignum_of_word_loop: movq %rdx, (%rsi,%rdi,8) decq %rdi jnz Lbignum_of_word_loop Lbignum_of_word_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_of_word) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,299
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_mux16.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Select element from 16-element table, z := xs[k*i] // Inputs xs[16*k], i; output z[k] // // extern void bignum_mux16(uint64_t k, uint64_t *z, const uint64_t *xs, // uint64_t i); // // It is assumed that all numbers xs[16] and the target z have the same size k // The pointer xs is to a contiguous array of size 16, elements size-k bignums // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = xs, RCX = i // Microsoft x64 ABI: RCX = k, RDX = z, R8 = xs, R9 = i // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mux16) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mux16) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mux16) .text #define k %rdi #define z %rsi // These get moved from original registers #define x %rcx #define i %rax // Other registers #define a %rdx #define b %r8 #define j %r9 #define n %r10 S2N_BN_SYMBOL(bignum_mux16): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Copy size into decrementable counter, or skip everything if k = 0 testq k, k jz Lbignum_mux16_end // If length = 0 do nothing movq k, n // Multiply i by k so we can compare pointer offsets directly with it movq %rcx, %rax movq %rdx, %rcx mulq k // Main loop Lbignum_mux16_loop: movq (x), a movq k, j .rep 15 movq (x,j,8), b cmpq i, j cmoveq b, a addq k, j .endr movq a, (z) addq $8, z addq $8, x decq n jnz Lbignum_mux16_loop Lbignum_mux16_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mux16) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,059
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_pow2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Return bignum of power of 2, z := 2^n // Input n; output z[k] // // extern void bignum_pow2(uint64_t k, uint64_t *z, uint64_t n); // // The result is as usual mod 2^{64*k}, so will be zero if n >= 64*k. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = n // Microsoft x64 ABI: RCX = k, RDX = z, R8 = n // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_pow2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_pow2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_pow2) .text #define k %rdi #define z %rsi #define n %rdx #define i %rcx #define w %rax #define a %r8 #define wshort %eax S2N_BN_SYMBOL(bignum_pow2): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // If k = 0 do nothing testq k, k jz Lbignum_pow2_end // Create the index n at which to write the nonzero word and the word w itself // Note that the x86 manual explicitly says that shift counts are taken modulo // the datasize, so we don't need to mask the lower 6 bits of n ourselves. movl $1, wshort movq n, %rcx shlq %cl, w shrq $6, n // Now in a constant-time fashion set the n'th word to w and others to zero xorq i, i Lbignum_pow2_loop: xorq a, a cmpq n, i cmovzq w, a movq a, (z,i,8) incq i cmpq k, i jc Lbignum_pow2_loop Lbignum_pow2_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_pow2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
9,915
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/word_divstep59.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Perform 59 "divstep" iterations and return signed matrix of updates // Inputs d, f, g; output m[2][2] and function return // // extern int64_t word_divstep59 // (int64_t m[2][2],int64_t d,uint64_t f,uint64_t g); // // Standard x86-64 ABI: RDI = m, RSI = d, RDX = f, RCX = g, returns RAX // Microsoft x64 ABI: RCX = m, RDX = d, R8 = f, R9 = g, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(word_divstep59) S2N_BN_FUNCTION_TYPE_DIRECTIVE(word_divstep59) S2N_BN_SYM_PRIVACY_DIRECTIVE(word_divstep59) .text #define mat %rdi #define d %rsi #define fuv %rbx #define grs %rcx #define f %r12 #define g %r13 #define m %r8 #define t %r9 #define zero %rbp #define zeroe %ebp #define minus2 %rax #define minus2e %eax #define plus2 %rdx #define plus2e %edx #define m00 %r8 #define m01 %r9 #define m10 %r10 #define m11 %r11 S2N_BN_SYMBOL(word_divstep59): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save extra registers CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) // Pack f and g into single registers with (negated) update matrices, // initially the identity matrix. The f_lo and g_lo are initially // the 20 lowest bits of f and g. // // fuv = f_lo - 2^41 * 1 - 2^62 * 0 // grs = g_lo - 2^41 * 0 - 2^62 * 1 movq %rdx, fuv movq %rdx, f andq $0xFFFFF, fuv movq $0xFFFFFE0000000000, %rax orq %rax, fuv movq %rcx, g andq $0xFFFFF, grs movq $0xc000000000000000, %rax orq %rax, grs // Now do 20 divsteps on that packed format. // // At the i'th iteration (starting at i = 0, ending at i = 20) // the intermediate packed values are of the form // // fuv = f_lo - 2^{41-i} * m00 - 2^{62-i} * m01 // grs = g_lo - 2^{41-i} * m10 - 2^{62-i} * m11 // // where the following matrix indicates the updates to apply // to the original (full-sized) f and g for those iterations. // // [m00 m01] * [f_0] = [f_i] // [m10 m11] [g_0] [g_i] movq $-2, minus2 xorl zeroe, zeroe movl $2, plus2e movq fuv, t movq minus2, m testq d, d cmovs zero, m testq $1, grs .set i, 0 .rep 20 cmovzq zero, m cmovzq zero, t .if (i != 0) sarq $1, grs .endif xorq m, t xorq m, d btq $63, m cmovcq grs, fuv movq minus2, m addq plus2, d leaq (grs,t), grs .if (i != 19) cmovs zero, m movq fuv, t testq plus2, grs .endif .set i, (i+1) .endr sarq $1, grs // Extract the matrix entries, but keep them in negated form. // Store them in the output buffer temporarily. movl $1048576, %eax leaq (fuv,%rax), m00 leaq (grs,%rax), m10 shlq $22, m00 shlq $22, m10 sarq $43, m00 sarq $43, m10 movq $2199024304128, %rax leaq (fuv,%rax), m01 leaq (grs,%rax), m11 sarq $42, m01 sarq $42, m11 movq m00, (mat) movq m01, 8(mat) movq m10, 16(mat) movq m11, 24(mat) // Compute updated f and g using the negated matrix entries; // this flips the signs of f and g but it doesn't matter. // // f = (m00 * f + m01 * g) / 2^20 // g = (m10 * f + m11 * g) / 2^20 // // Since we only need another 40 bits, we can do all of that // computation naively using (implicitly signed) 64-bit words. imulq f, m10 imulq m00, f imulq g, m01 imulq m11, g addq m01, f addq m10, g sarq $20, f sarq $20, g // Re-pack for 20 more rounds movq f, fuv andq $0xFFFFF, fuv movq $0xFFFFFE0000000000, %rax orq %rax, fuv movq g, grs andq $0xFFFFF, grs movq $0xc000000000000000, %rax orq %rax, grs // Second block of 20 divsteps in the same style movq $-2, minus2 movl $2, plus2e movq fuv, t movq minus2, m testq d, d cmovs zero, m testq $1, grs .set i, 0 .rep 20 cmovzq zero, m cmovzq zero, t .if (i != 0) sarq $1, grs .endif xorq m, t xorq m, d btq $63, m cmovcq grs, fuv movq minus2, m addq plus2, d leaq (grs,t), grs .if (i != 19) cmovs zero, m movq fuv, t testq plus2, grs .endif .set i, (i+1) .endr sarq $1, grs // Extract the next matrix entries, in negated form again movl $1048576, %eax leaq (fuv,%rax), m00 leaq (grs,%rax), m10 shlq $22, m00 shlq $22, m10 sarq $43, m00 sarq $43, m10 movq $2199024304128, %rax leaq (fuv,%rax), m01 leaq (grs,%rax), m11 sarq $42, m01 sarq $42, m11 // Compute updated f and g using the negated matrix entries, // and so again flipping (thus actually restoring) the signs. // // f = (n00 * f + n01 * g) / 2^20 // g = (n10 * f + n11 * g) / 2^20 movq g, fuv movq f, grs imulq m00, f imulq m01, fuv addq fuv, f imulq m11, g imulq m10, grs addq grs, g sarq $20, f sarq $20, g // Re-pack for 20 more rounds movq f, fuv andq $0xFFFFF, fuv movq $0xFFFFFE0000000000, %rax orq %rax, fuv movq g, grs andq $0xFFFFF, grs movq $0xc000000000000000, %rax orq %rax, grs // Multiply the first two matrices, and re-store in the output buffer. // // [m00_new m01_new] = [m00 m01] * [m00_prev m01_prev] // [m10_new m11_new] [m10 m11] [m10_prev m11_prev] // // The resulting matrix entries are: // // m00_new = m00 * m00_prev + m01 * m10_prev // m01_new = m00 * m01_prev + m01 * m11_prev // m10_new = m10 * m00_prev + m11 * m10_prev // m11_new = m10 * m01_prev + m11 * m11_prev // // At this point the sign is right since both matrices were negated. movq (mat), %rax imulq m00, %rax movq 16(mat), %rdx imulq m01, %rdx imulq 8(mat), m00 imulq 24(mat), m01 addq m00, m01 leaq (%rax,%rdx), m00 movq (mat), %rax imulq m10, %rax movq 16(mat), %rdx imulq m11, %rdx imulq 8(mat), m10 imulq 24(mat), m11 addq m10, m11 leaq (%rax,%rdx), m10 movq m00, (mat) movq m01, 8(mat) movq m10, 16(mat) movq m11, 24(mat) // Third block of divsteps, same style but a total of 19 not 20 movq $-2, minus2 movl $2, plus2e movq fuv, t movq minus2, m testq d, d cmovs zero, m testq $1, grs .set i, 0 .rep 19 cmovzq zero, m cmovzq zero, t .if (i != 0) sarq $1, grs .endif xorq m, t xorq m, d btq $63, m cmovcq grs, fuv movq minus2, m addq plus2, d leaq (grs,t), grs .if (i != 18) cmovs zero, m movq fuv, t testq plus2, grs .endif .set i, (i+1) .endr sarq $1, grs // Extract the matrix entries from the final 19 divsteps movl $1048576, %eax leaq (fuv,%rax), m00 leaq (grs,%rax), m10 shlq $21, m00 shlq $21, m10 sarq $43, m00 sarq $43, m10 movq $2199024304128, %rax leaq (fuv,%rax), m01 leaq (grs,%rax), m11 sarq $43, m01 sarq $43, m11 // Multiply by this new matrix // // [m00_new m01_new] = [m00 m01] * [m00_prev m01_prev] // [m10_new m11_new] [m10 m11] [m10_prev m11_prev] // // The resulting matrix entries are: // // m00_new = m00 * m00_prev + m01 * m10_prev // m01_new = m00 * m01_prev + m01 * m11_prev // m10_new = m10 * m00_prev + m11 * m10_prev // m11_new = m10 * m01_prev + m11 * m11_prev // // Since we didn't negate the n matrix, all products are negated // and so we insert negations movq (mat), %rax imulq m00, %rax movq 16(mat), %rdx imulq m01, %rdx imulq 8(mat), m00 imulq 24(mat), m01 addq m00, m01 leaq (%rax,%rdx), m00 negq m01 negq m00 movq (mat), %rax imulq m10, %rax movq 16(mat), %rdx imulq m11, %rdx imulq 8(mat), m10 imulq 24(mat), m11 addq m10, m11 leaq (%rax,%rdx), m10 negq m11 negq m10 // Now write back the final matrix and d for the whole 59 steps movq m00, (mat) movq m01, 8(mat) movq m10, 16(mat) movq m11, 24(mat) movq d, %rax // Restore registers and return CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(word_divstep59) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,376
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_sqr.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square z := x^2 // Input x[n]; output z[k] // // extern void bignum_sqr(uint64_t k, uint64_t *z, uint64_t n, const uint64_t *x); // // Does the "z := x^2" operation where x is n digits and result z is k. // Truncates the result in general unless k >= 2 * n // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = n, RCX = x // Microsoft x64 ABI: RCX = k, RDX = z, R8 = n, R9 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr) .text // First three are where arguments come in, but n is moved. #define p %rdi #define z %rsi #define x %rcx #define n %r8 // These are always local scratch since multiplier result is in these #define a %rax #define d %rdx // Other variables #define i %rbx #define ll %rbp #define hh %r9 #define k %r10 #define y %r11 #define htop %r12 #define l %r13 #define h %r14 #define c %r15 // Short versions #define llshort %ebp S2N_BN_SYMBOL(bignum_sqr): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // We use too many registers, and also we need %rax:%rdx for multiplications CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) movq %rdx, n // If p = 0 the result is trivial and nothing needs doing testq p, p jz Lbignum_sqr_end // initialize (hh,ll) = 0 xorl llshort, llshort xorq hh, hh // Iterate outer loop from k = 0 ... k = p - 1 producing result digits xorq k, k Lbignum_sqr_outerloop: // First let bot = MAX 0 (k + 1 - n) and top = MIN (k + 1) n // We want to accumulate all x[i] * x[k - i] for bot <= i < top // For the optimization of squaring we avoid duplication and do // 2 * x[i] * x[k - i] for i < htop, where htop = MIN ((k+1)/2) n // Initialize i = bot; in fact just compute bot as i directly. xorq c, c leaq 1(k), i movq i, htop shrq $1, htop subq n, i cmovcq c, i cmpq n, htop cmovncq n, htop // Initialize the three-part local sum (c,h,l); c was already done above xorq l, l xorq h, h // If htop <= bot then main doubled part of the sum is empty cmpq htop, i jnc Lbignum_sqr_nosumming // Use a moving pointer for [y] = x[k-i] for the cofactor movq k, a subq i, a leaq (x,a,8), y // Do the main part of the sum x[i] * x[k - i] for 2 * i < k Lbignum_sqr_innerloop: movq (x,i,8), a mulq (y) addq a, l adcq d, h adcq $0, c subq $8, y incq i cmpq htop, i jc Lbignum_sqr_innerloop // Now double it addq l, l adcq h, h adcq c, c // If k is even (which means 2 * i = k) and i < n add the extra x[i]^2 term Lbignum_sqr_nosumming: testq $1, k jnz Lbignum_sqr_innerend cmpq n, i jnc Lbignum_sqr_innerend movq (x,i,8), a mulq a addq a, l adcq d, h adcq $0, c // Now add the local sum into the global sum, store and shift Lbignum_sqr_innerend: addq ll, l movq l, (z,k,8) adcq hh, h movq h, ll adcq $0, c movq c, hh incq k cmpq p, k jc Lbignum_sqr_outerloop // Restore registers and return Lbignum_sqr_end: CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,702
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_cmadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply-add with single-word multiplier, z := z + c * y // Inputs c, y[n]; outputs function return (carry-out) and z[k] // // extern uint64_t bignum_cmadd(uint64_t k, uint64_t *z, uint64_t c, uint64_t n, // const uint64_t *y); // // Does the "z := z + c * y" operation where y is n digits, result z is p. // Truncates the result in general. // // The return value is a high/carry word that is meaningful when p = n + 1, or // more generally when n <= p and the result fits in p + 1 digits. In these // cases it gives the top digit of the (p + 1)-digit result. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = c, RCX = n, R8 = y, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = c, R9 = n, [RSP+40] = y, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmadd) .text #define p %rdi #define z %rsi #define c %r9 #define n %rcx #define x %r8 #define i %r10 #define h %r11 #define r %rbx #define hshort %r11d #define ishort %r10d S2N_BN_SYMBOL(bignum_cmadd): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // Seems hard to avoid one more register CFI_PUSH(%rbx) // First clamp the input size n := min(p,n) since we can never need to read // past the p'th term of the input to generate p-digit output. // Subtract p := p - min(n,p) so it holds the size of the extra tail needed cmpq n, p cmovcq p, n subq n, p // Initialize high part h = 0; if n = 0 do nothing but return that zero xorq h, h testq n, n jz Lbignum_cmadd_end // Move c into a safer register as multiplies overwrite %rdx movq %rdx, c // Initialization of the loop: 2^64 * CF + [h,z_0'] = z_0 + c * x_0 movq (x), %rax mulq c addq %rax, (z) movq %rdx, h movl $1, ishort decq n jz Lbignum_cmadd_hightail // Main loop, where we always have CF + previous high part h to add in Lbignum_cmadd_loop: adcq (z,i,8), h sbbq r, r movq (x,i,8), %rax mulq c subq r, %rdx addq h, %rax movq %rax, (z,i,8) movq %rdx, h incq i decq n jnz Lbignum_cmadd_loop Lbignum_cmadd_hightail: adcq $0, h // Propagate the carry all the way to the end with h as extra carry word Lbignum_cmadd_tail: testq p, p jz Lbignum_cmadd_end addq h, (z,i,8) movl $0, hshort incq i decq p jz Lbignum_cmadd_highend Lbignum_cmadd_tloop: adcq h, (z,i,8) incq i decq p jnz Lbignum_cmadd_tloop Lbignum_cmadd_highend: adcq $0, h // Return the high/carry word Lbignum_cmadd_end: movq h, %rax CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cmadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,240
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/word_min.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Return minimum of two unsigned 64-bit words // Inputs a, b; output function return // // extern uint64_t word_min(uint64_t a, uint64_t b); // // Standard x86-64 ABI: RDI = a, RSI = b, returns RAX // Microsoft x64 ABI: RCX = a, RDX = b, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(word_min) S2N_BN_FUNCTION_TYPE_DIRECTIVE(word_min) S2N_BN_SYM_PRIVACY_DIRECTIVE(word_min) .text #define a %rdi #define b %rsi S2N_BN_SYMBOL(word_min): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif movq a, %rax cmpq b, a cmovncq b, %rax #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(word_min) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,092
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_mul.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply z := x * y // Inputs x[m], y[n]; output z[k] // // extern void bignum_mul(uint64_t k, uint64_t *z, uint64_t m, const uint64_t *x, // uint64_t n, const uint64_t *y); // // Does the "z := x * y" operation where x is m digits, y is n, result z is k. // Truncates the result in general unless k >= m + n // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = m, RCX = x, R8 = n, R9 = y // Microsoft x64 ABI: RCX = k, RDX = z, R8 = m, R9 = x, [RSP+40] = n, [RSP+48] = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul) .text // These are actually right #define p %rdi #define z %rsi #define n %r8 // These are not #define c %r15 #define h %r14 #define l %r13 #define x %r12 #define y %r11 #define i %rbx #define k %r10 #define m %rbp // These are always local scratch since multiplier result is in these #define a %rax #define d %rdx S2N_BN_SYMBOL(bignum_mul): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 movq 64(%rsp), %r9 #endif // We use too many registers, and also we need %rax:%rdx for multiplications CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) movq %rdx, m // If the result size is zero, do nothing // Note that even if either or both inputs has size zero, we can't // just give up because we at least need to zero the output array // If we did a multiply-add variant, however, then we could testq p, p jz Lbignum_mul_end // Set initial 2-part sum to zero (we zero c inside the body) xorq h, h xorq l, l // Otherwise do outer loop k = 0 ... k = p - 1 xorq k, k Lbignum_mul_outerloop: // Zero our carry term first; we eventually want it and a zero is useful now // Set a = max 0 (k + 1 - n), i = min (k + 1) m // This defines the range a <= j < i for the inner summation // Note that since k < p < 2^64 we can assume k + 1 doesn't overflow // And since we want to increment it anyway, we might as well do it now xorq c, c // c = 0 incq k // k = k + 1 movq k, a // a = k + 1 subq n, a // a = k + 1 - n cmovcq c, a // a = max 0 (k + 1 - n) movq m, i // i = m cmpq m, k // CF <=> k + 1 < m cmovcq k, i // i = min (k + 1) m // Turn i into a loop count, and skip things if it's <= 0 // Otherwise set up initial pointers x -> x0[a] and y -> y0[k - a] // and then launch into the main inner loop, postdecrementing i movq k, d subq i, d subq a, i jbe Lbignum_mul_innerend leaq (%rcx,a,8), x leaq -8(%r9,d,8), y Lbignum_mul_innerloop: movq (y,i,8), %rax mulq (x) addq $8, x addq %rax, l adcq %rdx, h adcq $0, c decq i jnz Lbignum_mul_innerloop Lbignum_mul_innerend: movq l, (z) movq h, l movq c, h addq $8, z cmpq p, k jc Lbignum_mul_outerloop Lbignum_mul_end: CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,486
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_modoptneg.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Optionally negate modulo m, z := (-x) mod m (if p nonzero) or z := x // (if p zero), assuming x reduced // Inputs p, x[k], m[k]; output z[k] // // extern void bignum_modoptneg(uint64_t k, uint64_t *z, uint64_t p, // const uint64_t *x, const uint64_t *m); // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = p, RCX = x, R8 = m // Microsoft x64 ABI: RCX = k, RDX = z, R8 = p, R9 = x, [RSP+40] = m // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_modoptneg) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_modoptneg) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_modoptneg) .text #define k %rdi #define z %rsi #define p %rdx #define x %rcx #define m %r8 #define a %r9 #define c %rax #define b %r10 #define i %r11 S2N_BN_SYMBOL(bignum_modoptneg): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx movq 56(%rsp), %r8 #endif // Do nothing if k = 0 testq k, k jz Lbignum_modoptneg_end // Make an additional check for zero input, and force p to zero in this case. // This can be skipped if the input is known not to be zero a priori. xorq i, i xorq a, a Lbignum_modoptneg_cmploop: orq (x,i,8), a incq i cmpq k, i jc Lbignum_modoptneg_cmploop cmpq $0, a cmovzq a, p // Turn the input p into a strict bitmask negq p sbbq p, p // Main loop xorq i, i movq p, c Lbignum_modoptneg_mainloop: movq (m,i,8), a andq p, a movq (x,i,8), b xorq p, b negq c adcq b, a sbbq c, c movq a, (z,i,8) incq i cmpq k, i jc Lbignum_modoptneg_mainloop Lbignum_modoptneg_end: #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_modoptneg) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,504
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/bignum_negmodinv.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Negated modular inverse, z := (-1/x) mod 2^{64k} // Input x[k]; output z[k] // // extern void bignum_negmodinv(uint64_t k, uint64_t *z, const uint64_t *x); // // Assuming x is odd (otherwise nothing makes sense) the result satisfies // // x * z + 1 == 0 (mod 2^{64 * k}) // // but is not necessarily reduced mod x. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = x // Microsoft x64 ABI: RCX = k, RDX = z, R8 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_negmodinv) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_negmodinv) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_negmodinv) .text #define k %rdi #define z %rsi // Moved from initial location to free %rdx #define x %rcx #define a %rax #define d %rdx #define i %r8 #define m %r9 #define h %r10 #define w %r11 #define t %r12 #define e %rbx #define ashort %eax #define ishort %r8d S2N_BN_SYMBOL(bignum_negmodinv): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif CFI_PUSH(%rbx) CFI_PUSH(%r12) // If k = 0 do nothing (actually we could have avoiding the pushes and pops) testq k, k jz Lbignum_negmodinv_end // Move the x pointer into its permanent home (%rdx is needed for muls) movq %rdx, x // Compute word-level negated modular inverse w for x[0]. movq (x), a movq a, d movq a, w shlq $2, d subq d, w xorq $2, w movq w, d imulq a, d movl $2, ashort addq d, a addq $1, d imulq a, w imulq d, d movl $1, ashort addq d, a imulq a, w imulq d, d movl $1, ashort addq d, a imulq a, w imulq d, d movl $1, ashort addq d, a imulq a, w // Write that as lowest word of the output, then if k = 1 we're finished movq w, (z) cmpq $1, k jz Lbignum_negmodinv_end // Otherwise compute and write the other digits (1..k-1) of w * x + 1 movq (x), a xorq h, h mulq w addq $1, a adcq d, h movl $1, ishort Lbignum_negmodinv_initloop: movq (x,i,8), a mulq w addq h, a adcq $0, d movq a, (z,i,8) movq d, h incq i cmpq k, i jc Lbignum_negmodinv_initloop // For simpler indexing, z := z + 8 and k := k - 1 per outer iteration // Then we can use the same index for x and for z and effective size k. // // But we also offset k by 1 so the "real" size is k + 1; after doing // the special zeroth bit we count with t through k more digits, so // getting k + 1 total as required. // // This lets us avoid some special cases inside the loop at the cost // of needing the additional "finale" tail for the final iteration // since we do one outer loop iteration too few. subq $2, k jz Lbignum_negmodinv_finale Lbignum_negmodinv_outerloop: addq $8, z movq (z), h movq w, m imulq h, m movq m, (z) movq (x), a mulq m addq h, a adcq $0, d movq d, h movl $1, ishort movq k, t Lbignum_negmodinv_innerloop: adcq (z,i,8), h sbbq e, e movq (x,i,8), a mulq m subq e, d addq h, a movq a, (z,i,8) movq d, h incq i decq t jnz Lbignum_negmodinv_innerloop decq k jnz Lbignum_negmodinv_outerloop Lbignum_negmodinv_finale: movq 8(z), a imulq w, a movq a, 8(z) Lbignum_negmodinv_end: CFI_POP(%r12) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_negmodinv) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,355
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/generic/word_negmodinv.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Single-word negated modular inverse (-1/a) mod 2^64 // Input a; output function return // // extern uint64_t word_negmodinv(uint64_t a); // // A 64-bit function that returns a negated multiplicative inverse mod 2^64 // of its input, assuming that input is odd. Given odd input a, the result z // will satisfy a * z + 1 == 0 (mod 2^64), i.e. a 64-bit word multiplication // a * z will give -1. // // Standard x86-64 ABI: RDI = a, returns RAX // Microsoft x64 ABI: RCX = a, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(word_negmodinv) S2N_BN_FUNCTION_TYPE_DIRECTIVE(word_negmodinv) S2N_BN_SYM_PRIVACY_DIRECTIVE(word_negmodinv) .text S2N_BN_SYMBOL(word_negmodinv): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi #endif // Initial magical 5-bit approximation x = (a - a<<2) xor 2 movq %rdi, %rcx movq %rdi, %rax shlq $2, %rcx subq %rcx, %rax xorq $2, %rax // Now refine to 64-bit congruence movq %rax, %rcx // %rcx = x imulq %rdi, %rcx // %rcx = a * x movl $2, %edx addq %rcx, %rdx // %rdx = 1 + e = 2 + a * x addq $1, %rcx // %rcx = e = a * x + 1 imulq %rdx, %rax // %rax = x * (1 + e) imulq %rcx, %rcx // %rcx = e^2 movl $1, %edx addq %rcx, %rdx imulq %rdx, %rax // %rax = x * (1 + e) * (1 + e^2) imulq %rcx, %rcx // %rcx = e^4 movl $1, %edx addq %rcx, %rdx imulq %rdx, %rax // %rax = x * (1 + e) * (1 + e^2) * (1 + e^4) imulq %rcx, %rcx // %rcx = e^8 movl $1, %edx addq %rcx, %rdx imulq %rdx, %rax // %rax = x * (1 + e) * ... * * (1 + e^8) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(word_negmodinv) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,654
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_mul_4_8_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply z := x * y // Inputs x[4], y[4]; output z[8] // // extern void bignum_mul_4_8_alt(uint64_t z[static 8], const uint64_t x[static 4], // const uint64_t y[static 4]); // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_4_8_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_4_8_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_4_8_alt) .text // These are actually right #define z %rdi #define x %rsi // This is moved from %rdx to free it for muls #define y %rcx // Other variables used as a rotating 3-word window to add terms to #define t0 %r8 #define t1 %r9 #define t2 %r10 // Macro for the key "multiply and add to (c,h,l)" step #define combadd(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c // A minutely shorter form for when c = 0 initially #define combadz(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq c, c // A short form where we don't expect a top carry #define combads(h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h S2N_BN_SYMBOL(bignum_mul_4_8_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Copy y into a safe register to start with movq %rdx, y // Result term 0 movq (x), %rax mulq (y) movq %rax, (z) movq %rdx, t0 xorq t1, t1 // Result term 1 xorq t2, t2 combads(t1,t0,(x),8(y)) combadz(t2,t1,t0,8(x),(y)) movq t0, 8(z) // Result term 2 xorq t0, t0 combadz(t0,t2,t1,(x),16(y)) combadd(t0,t2,t1,8(x),8(y)) combadd(t0,t2,t1,16(x),(y)) movq t1, 16(z) // Result term 3 xorq t1, t1 combadz(t1,t0,t2,(x),24(y)) combadd(t1,t0,t2,8(x),16(y)) combadd(t1,t0,t2,16(x),8(y)) combadd(t1,t0,t2,24(x),(y)) movq t2, 24(z) // Result term 4 xorq t2, t2 combadz(t2,t1,t0,8(x),24(y)) combadd(t2,t1,t0,16(x),16(y)) combadd(t2,t1,t0,24(x),8(y)) movq t0, 32(z) // Result term 5 xorq t0, t0 combadz(t0,t2,t1,16(x),24(y)) combadd(t0,t2,t1,24(x),16(y)) movq t1, 40(z) // Result term 6 xorq t1, t1 combads(t0,t2,24(x),24(y)) movq t2, 48(z) // Result term 7 movq t0, 56(z) // Return #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul_4_8_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,700
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_mul_6_12_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply z := x * y // Inputs x[6], y[6]; output z[12] // // extern void bignum_mul_6_12_alt(uint64_t z[static 12], // const uint64_t x[static 6], // const uint64_t y[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_6_12_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_6_12_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_6_12_alt) .text // These are actually right #define z %rdi #define x %rsi // This is moved from %rdx to free it for muls #define y %rcx // Other variables used as a rotating 3-word window to add terms to #define t0 %r8 #define t1 %r9 #define t2 %r10 // Macro for the key "multiply and add to (c,h,l)" step #define combadd(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c // A minutely shorter form for when c = 0 initially #define combadz(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq c, c // A short form where we don't expect a top carry #define combads(h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h S2N_BN_SYMBOL(bignum_mul_6_12_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Copy y into a safe register to start with movq %rdx, y // Result term 0 movq (x), %rax mulq (y) movq %rax, (z) movq %rdx, t0 xorq t1, t1 // Result term 1 xorq t2, t2 combads(t1,t0,(x),8(y)) combadz(t2,t1,t0,8(x),(y)) movq t0, 8(z) // Result term 2 xorq t0, t0 combadz(t0,t2,t1,(x),16(y)) combadd(t0,t2,t1,8(x),8(y)) combadd(t0,t2,t1,16(x),(y)) movq t1, 16(z) // Result term 3 xorq t1, t1 combadz(t1,t0,t2,(x),24(y)) combadd(t1,t0,t2,8(x),16(y)) combadd(t1,t0,t2,16(x),8(y)) combadd(t1,t0,t2,24(x),(y)) movq t2, 24(z) // Result term 4 xorq t2, t2 combadz(t2,t1,t0,(x),32(y)) combadd(t2,t1,t0,8(x),24(y)) combadd(t2,t1,t0,16(x),16(y)) combadd(t2,t1,t0,24(x),8(y)) combadd(t2,t1,t0,32(x),(y)) movq t0, 32(z) // Result term 5 xorq t0, t0 combadz(t0,t2,t1,(x),40(y)) combadd(t0,t2,t1,8(x),32(y)) combadd(t0,t2,t1,16(x),24(y)) combadd(t0,t2,t1,24(x),16(y)) combadd(t0,t2,t1,32(x),8(y)) combadd(t0,t2,t1,40(x),(y)) movq t1, 40(z) // Result term 6 xorq t1, t1 combadz(t1,t0,t2,8(x),40(y)) combadd(t1,t0,t2,16(x),32(y)) combadd(t1,t0,t2,24(x),24(y)) combadd(t1,t0,t2,32(x),16(y)) combadd(t1,t0,t2,40(x),8(y)) movq t2, 48(z) // Result term 7 xorq t2, t2 combadz(t2,t1,t0,16(x),40(y)) combadd(t2,t1,t0,24(x),32(y)) combadd(t2,t1,t0,32(x),24(y)) combadd(t2,t1,t0,40(x),16(y)) movq t0, 56(z) // Result term 8 xorq t0, t0 combadz(t0,t2,t1,24(x),40(y)) combadd(t0,t2,t1,32(x),32(y)) combadd(t0,t2,t1,40(x),24(y)) movq t1, 64(z) // Result term 9 xorq t1, t1 combadz(t1,t0,t2,32(x),40(y)) combadd(t1,t0,t2,40(x),32(y)) movq t2, 72(z) // Result term 10 combads(t1,t0,40(x),40(y)) movq t0, 80(z) // Result term 11 movq t1, 88(z) // Return #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul_6_12_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
33,938
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_kmul_32_64.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply z := x * y // Inputs x[32], y[32]; output z[64]; temporary buffer t[>=96] // // extern void bignum_kmul_32_64(uint64_t z[static 64], // const uint64_t x[static 32], // const uint64_t y[static 32], // uint64_t t[static 96]); // // This is a Karatsuba-style function multiplying half-sized results // internally and using temporary buffer t for intermediate results. The size // of 96 is an overstatement for compatibility with the ARM version; it // actually only uses 65 elements of t (64 + 1 for a stashed sign). // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y, RCX = t // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y, R9 = t // ----------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_kmul_32_64) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_kmul_32_64) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_kmul_32_64) .text #define K 16 #define z %rdi #define x %rsi #define y %rcx #define s %r9 // We re-use the y variable to point at t later on, when this seems clearer #define t %rcx S2N_BN_SYMBOL(bignum_kmul_32_64): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save callee-saved registers and also push t onto the stack; we'll // use this space to back up both t and later z. Then move the y variable // into its longer-term home for the first few stages. CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) CFI_PUSH(%rcx) movq %rdx, y // Multiply the low halves CFI_CALL(Lbignum_kmul_32_64_local_bignum_kmul_16_32) // Multiply the high halves leaq 16*K-0x40(%rdi), %rdi leaq 8*K-0x40(%rsi), %rsi leaq 8*K(%rcx), %rcx CFI_CALL(Lbignum_kmul_32_64_local_bignum_kmul_16_32) // Establish %r8 as the t pointer and use the cell to back up z now movq (%rsp), %r8 subq $16*K+0x40, %rdi movq %rdi, (%rsp) // Form |x_lo - x_hi| starting at t movq -8*K-0x40(%rsi), %rax subq -8*K-0x40+8*K(%rsi), %rax movq %rax, (%r8) .set I, 1 .rep K-1 movq -8*K-0x40+8*I(%rsi), %rax sbbq -8*K-0x40+8*K+8*I(%rsi), %rax movq %rax, 8*I(%r8) .set I, (I+1) .endr movl $0, %ebx sbbq s, s // Maintain CF, set ZF for cmovs, record sign .set I, 0 .rep K movq 8*I(%r8), %rdx movq %rdx, %rax notq %rdx cmovzq %rax, %rdx adcxq %rbx, %rdx movq %rdx, 8*I(%r8) .set I, (I+1) .endr // Form |y_hi - y_lo| (note opposite order) starting at t[K] movq -8*K+8*K(%rcx), %rax subq -8*K(%rcx), %rax movq %rax, 8*K(%r8) .set I, 1 .rep K-1 movq -8*K+8*K+8*I(%rcx), %rax sbbq -8*K+8*I(%rcx), %rax movq %rax, 8*K+8*I(%r8) .set I, (I+1) .endr movl $0, %ebx sbbq %rbp, %rbp // Maintain CF, set ZF for cmovs .set I, 0 .rep K movq 8*K+8*I(%r8), %rdx movq %rdx, %rax notq %rdx cmovzq %rax, %rdx adcxq %rbx, %rdx movq %rdx, 8*K+8*I(%r8) .set I, (I+1) .endr // Stash the final sign with which to add things at t[4*K] xorq %rbp, s movq s, 32*K(%r8) // Multiply the absolute differences, putting the result at t[2*K] // This has the side-effect of putting t in the "right" register %rcx // so after the load of z, we have both z and t pointers straight. movq %r8, %rcx leaq 8*K(%r8), %rsi leaq 16*K(%r8), %rdi CFI_CALL(Lbignum_kmul_32_64_local_bignum_kmul_16_32) movq (%rsp), z // Compose the middle parts [2,1] + [1,0] + [3,2], saving carry in %rbx. // Put the sum at t, overwriting the absolute differences we no longer need. xorl %ebx, %ebx .set I, 0 .rep 2*K movq 8*K+8*I(z), %rax adcxq 8*I(z), %rax adoxq 16*K+8*I(z), %rax movq %rax, 8*I(t) .set I, (I+1) .endr adoxq %rbx, %rbx adcq $0, %rbx // Sign-aware addition or subtraction of the complicated term. // We double-negate it to set CF/ZF while not spoiling its // actual form: note that we eventually adcx to it below. movq 32*K(t), s negq s negq s .set I, 0 .rep 2*K movq 16*K+8*I(t), %rdx movq %rdx, %rax notq %rdx cmovzq %rax, %rdx adcxq 8*I(t), %rdx movq %rdx, 8*K+8*I(z) .set I, (I+1) .endr // Bump the accumulated carry. This must end up >= 0 because it's the top // word of a value of the form ... + h * h' + l * l' - (h - l) * (h' - l') >= 0 adcxq s, %rbx // Finally propagate the carry to the top part xorl %eax, %eax addq %rbx, 24*K(z) .set I, 1 .rep K-1 adcq %rax, 24*K+8*I(z) .set I, (I+1) .endr // Restore and return. The first pop is not needed for the ABI but // we need to adjust the stack anyway so it seems reasonable. CFI_POP(%rcx) CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET // Local copy of half-length subroutine. This has a slightly different // interface, expecting y argument in %rcx directly, and not doing any // save-restore of the other registers. It naturally moves z and x on by // 0x40, which we compensate for when it is called by adjusting offsets. S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lbignum_kmul_32_64_local_bignum_kmul_16_32) Lbignum_kmul_32_64_local_bignum_kmul_16_32: CFI_START movq (%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %r9 movq %rax, (%rdi) mulxq 0x8(%rsi), %rax, %r10 adcxq %rax, %r9 mulxq 0x10(%rsi), %rax, %r11 adcxq %rax, %r10 mulxq 0x18(%rsi), %rax, %r12 adcxq %rax, %r11 mulxq 0x20(%rsi), %rax, %r13 adcxq %rax, %r12 mulxq 0x28(%rsi), %rax, %r14 adcxq %rax, %r13 mulxq 0x30(%rsi), %rax, %r15 adcxq %rax, %r14 mulxq 0x38(%rsi), %rax, %r8 adcxq %rax, %r15 adcq %rbp, %r8 movq 0x8(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 movq %r9, 0x8(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x38(%rsi), %rax, %r9 adcxq %rax, %r8 adoxq %rbp, %r9 adcq %rbp, %r9 movq 0x10(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq %r10, 0x10(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x38(%rsi), %rax, %r10 adcxq %rax, %r9 adoxq %rbp, %r10 adcq %rbp, %r10 movq 0x18(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq %r11, 0x18(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x38(%rsi), %rax, %r11 adcxq %rax, %r10 adoxq %rbp, %r11 adcq %rbp, %r11 movq 0x20(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq %r12, 0x20(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x38(%rsi), %rax, %r12 adcxq %rax, %r11 adoxq %rbp, %r12 adcq %rbp, %r12 movq 0x28(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 movq %r13, 0x28(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x38(%rsi), %rax, %r13 adcxq %rax, %r12 adoxq %rbp, %r13 adcq %rbp, %r13 movq 0x30(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 movq %r14, 0x30(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x38(%rsi), %rax, %r14 adcxq %rax, %r13 adoxq %rbp, %r14 adcq %rbp, %r14 movq 0x38(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 movq %r15, 0x38(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x38(%rsi), %rax, %r15 adcxq %rax, %r14 adoxq %rbp, %r15 adcq %rbp, %r15 movq 0x40(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 movq %r8, 0x40(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x38(%rsi), %rax, %r8 adcxq %rax, %r15 adoxq %rbp, %r8 adcq %rbp, %r8 movq 0x48(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 movq %r9, 0x48(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x38(%rsi), %rax, %r9 adcxq %rax, %r8 adoxq %rbp, %r9 adcq %rbp, %r9 movq 0x50(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq %r10, 0x50(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x38(%rsi), %rax, %r10 adcxq %rax, %r9 adoxq %rbp, %r10 adcq %rbp, %r10 movq 0x58(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq %r11, 0x58(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x38(%rsi), %rax, %r11 adcxq %rax, %r10 adoxq %rbp, %r11 adcq %rbp, %r11 movq 0x60(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq %r12, 0x60(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x38(%rsi), %rax, %r12 adcxq %rax, %r11 adoxq %rbp, %r12 adcq %rbp, %r12 movq 0x68(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 movq %r13, 0x68(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x38(%rsi), %rax, %r13 adcxq %rax, %r12 adoxq %rbp, %r13 adcq %rbp, %r13 movq 0x70(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 movq %r14, 0x70(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x38(%rsi), %rax, %r14 adcxq %rax, %r13 adoxq %rbp, %r14 adcq %rbp, %r14 movq 0x78(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 movq %r15, 0x78(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x38(%rsi), %rax, %r15 adcxq %rax, %r14 adoxq %rbp, %r15 adcq %rbp, %r15 movq %r8, 0x80(%rdi) movq %r9, 0x88(%rdi) movq %r10, 0x90(%rdi) movq %r11, 0x98(%rdi) movq %r12, 0xa0(%rdi) movq %r13, 0xa8(%rdi) movq %r14, 0xb0(%rdi) movq %r15, 0xb8(%rdi) addq $0x40, %rdi addq $0x40, %rsi movq (%rcx), %rdx xorl %ebp, %ebp movq (%rdi), %r8 movq 0x8(%rdi), %r9 mulxq (%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 movq %r8, (%rdi) movq 0x10(%rdi), %r10 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 movq 0x18(%rdi), %r11 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq 0x20(%rdi), %r12 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq 0x28(%rdi), %r13 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq 0x30(%rdi), %r14 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 movq 0x38(%rdi), %r15 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x38(%rsi), %rax, %r8 adcxq %rax, %r15 adoxq %rbp, %r8 adcxq %rbp, %r8 movq 0x8(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 movq %r9, 0x8(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x38(%rsi), %rax, %r9 adcxq %rax, %r8 adoxq %rbp, %r9 adcq %rbp, %r9 movq 0x10(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq %r10, 0x10(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x38(%rsi), %rax, %r10 adcxq %rax, %r9 adoxq %rbp, %r10 adcq %rbp, %r10 movq 0x18(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq %r11, 0x18(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x38(%rsi), %rax, %r11 adcxq %rax, %r10 adoxq %rbp, %r11 adcq %rbp, %r11 movq 0x20(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq %r12, 0x20(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x38(%rsi), %rax, %r12 adcxq %rax, %r11 adoxq %rbp, %r12 adcq %rbp, %r12 movq 0x28(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 movq %r13, 0x28(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x38(%rsi), %rax, %r13 adcxq %rax, %r12 adoxq %rbp, %r13 adcq %rbp, %r13 movq 0x30(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 movq %r14, 0x30(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x38(%rsi), %rax, %r14 adcxq %rax, %r13 adoxq %rbp, %r14 adcq %rbp, %r14 movq 0x38(%rcx), %rdx xorl %ebp, %ebp mulxq (%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 movq %r15, 0x38(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x38(%rsi), %rax, %r15 adcxq %rax, %r14 adoxq %rbp, %r15 adcq %rbp, %r15 movq 0x40(%rcx), %rdx xorl %ebp, %ebp adoxq 0x40(%rdi), %r8 mulxq (%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 movq %r8, 0x40(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x38(%rsi), %rax, %r8 adcxq %rax, %r15 adoxq %rbp, %r8 adcq %rbp, %r8 movq 0x48(%rcx), %rdx xorl %ebp, %ebp adoxq 0x48(%rdi), %r9 mulxq (%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 movq %r9, 0x48(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x38(%rsi), %rax, %r9 adcxq %rax, %r8 adoxq %rbp, %r9 adcq %rbp, %r9 movq 0x50(%rcx), %rdx xorl %ebp, %ebp adoxq 0x50(%rdi), %r10 mulxq (%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq %r10, 0x50(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x38(%rsi), %rax, %r10 adcxq %rax, %r9 adoxq %rbp, %r10 adcq %rbp, %r10 movq 0x58(%rcx), %rdx xorl %ebp, %ebp adoxq 0x58(%rdi), %r11 mulxq (%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq %r11, 0x58(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x38(%rsi), %rax, %r11 adcxq %rax, %r10 adoxq %rbp, %r11 adcq %rbp, %r11 movq 0x60(%rcx), %rdx xorl %ebp, %ebp adoxq 0x60(%rdi), %r12 mulxq (%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq %r12, 0x60(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x38(%rsi), %rax, %r12 adcxq %rax, %r11 adoxq %rbp, %r12 adcq %rbp, %r12 movq 0x68(%rcx), %rdx xorl %ebp, %ebp adoxq 0x68(%rdi), %r13 mulxq (%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 movq %r13, 0x68(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x38(%rsi), %rax, %r13 adcxq %rax, %r12 adoxq %rbp, %r13 adcq %rbp, %r13 movq 0x70(%rcx), %rdx xorl %ebp, %ebp adoxq 0x70(%rdi), %r14 mulxq (%rsi), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 movq %r14, 0x70(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x38(%rsi), %rax, %r14 adcxq %rax, %r13 adoxq %rbp, %r14 adcq %rbp, %r14 movq 0x78(%rcx), %rdx xorl %ebp, %ebp adoxq 0x78(%rdi), %r15 mulxq (%rsi), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 movq %r15, 0x78(%rdi) mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x30(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x38(%rsi), %rax, %r15 adcxq %rax, %r14 adoxq %rbp, %r15 adcq %rbp, %r15 movq %r8, 0x80(%rdi) movq %r9, 0x88(%rdi) movq %r10, 0x90(%rdi) movq %r11, 0x98(%rdi) movq %r12, 0xa0(%rdi) movq %r13, 0xa8(%rdi) movq %r14, 0xb0(%rdi) movq %r15, 0xb8(%rdi) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lbignum_kmul_32_64_local_bignum_kmul_16_32) S2N_BN_SIZE_DIRECTIVE(bignum_kmul_32_64) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,784
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_sqr_4_8.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square, z := x^2 // Input x[4]; output z[8] // // extern void bignum_sqr_4_8(uint64_t z[static 8], const uint64_t x[static 4]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_4_8) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_4_8) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_4_8) .text // These are actually right #define z %rdi #define x %rsi // A zero register #define zero %rbp #define zeroe %ebp // Other registers #define d1 %r8 #define d2 %r9 #define d3 %r10 #define d4 %r11 #define d5 %r12 #define d6 %r13 S2N_BN_SYMBOL(bignum_sqr_4_8): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) // Set up an initial window [d6;...d1] = [23;03;01] movq (x), %rdx mulxq 8(x), d1, d2 mulxq 24(x), d3, d4 movq 16(x), %rdx mulxq 24(x), d5, d6 // Clear our zero register, and also initialize the flags for the carry chain xorl zeroe, zeroe // Chain in the addition of 02 + 12 + 13 to that window (no carry-out possible) // This gives all the "heterogeneous" terms of the squaring ready to double mulxq (x), %rax, %rcx adcxq %rax, d2 adoxq %rcx, d3 mulxq 8(x), %rax, %rcx adcxq %rax, d3 adoxq %rcx, d4 movq 24(x), %rdx mulxq 8(x), %rax, %rcx adcxq %rax, d4 adoxq %rcx, d5 adcxq zero, d5 adoxq zero, d6 adcxq zero, d6 // In principle this is otiose as CF and OF carries are absorbed at this point // However it seems helpful for the OOO engine to be told it's a fresh start xorl zeroe, zeroe // Double and add to the 00 + 11 + 22 + 33 terms // // We could use shift-double but this seems tidier and in larger squarings // it was actually more efficient. I haven't experimented with this small // case to see how much that matters. Note: the writeback here is sprinkled // into the sequence in such a way that things still work if z = x, i.e. if // the output overwrites the input buffer and beyond. movq (x), %rdx mulxq %rdx, %rax, %rdx movq %rax, (z) adcxq d1, d1 adoxq %rdx, d1 movq 8(x), %rdx movq d1, 8(z) mulxq %rdx, %rax, %rdx adcxq d2, d2 adoxq %rax, d2 adcxq d3, d3 adoxq %rdx, d3 movq 16(x), %rdx movq d2, 16(z) mulxq %rdx, %rax, %rdx adcxq d4, d4 adoxq %rax, d4 adcxq d5, d5 adoxq %rdx, d5 movq 24(x), %rdx movq d3, 24(z) mulxq %rdx, %rax, %rdx movq d4, 32(z) adcxq d6, d6 movq d5, 40(z) adoxq %rax, d6 movq d6, 48(z) adcxq zero, %rdx adoxq zero, %rdx movq %rdx, 56(z) // Restore saved registers and return CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr_4_8) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
7,484
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_sqr_8_16.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square, z := x^2 // Input x[8]; output z[16] // // extern void bignum_sqr_8_16(uint64_t z[static 16], const uint64_t x[static 8]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_8_16) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_8_16) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_8_16) .text // These are actually right #define z %rdi #define x %rsi // A zero register #define zero %rbp #define zeroe %ebp // mulpadd i, j adds rdx * x[i] into the window at the i+j point .macro mulpadd arg1,arg2 mulxq 8*\arg1(x), %rax, %rcx .if ((\arg1 + \arg2) % 8 == 0) adcxq %rax, %r8 adoxq %rcx, %r9 .elseif ((\arg1 + \arg2) % 8 == 1) adcxq %rax, %r9 adoxq %rcx, %r10 .elseif ((\arg1 + \arg2) % 8 == 2) adcxq %rax, %r10 adoxq %rcx, %r11 .elseif ((\arg1 + \arg2) % 8 == 3) adcxq %rax, %r11 adoxq %rcx, %r12 .elseif ((\arg1 + \arg2) % 8 == 4) adcxq %rax, %r12 adoxq %rcx, %r13 .elseif ((\arg1 + \arg2) % 8 == 5) adcxq %rax, %r13 adoxq %rcx, %r14 .elseif ((\arg1 + \arg2) % 8 == 6) adcxq %rax, %r14 adoxq %rcx, %r15 .elseif ((\arg1 + \arg2) % 8 == 7) adcxq %rax, %r15 adoxq %rcx, %r8 .endif .endm // mulpade i, j adds rdx * x[i] into the window at i+j // but re-creates the top word assuming nothing to add there .macro mulpade arg1,arg2 .if ((\arg1 + \arg2) % 8 == 0) mulxq 8*\arg1(x), %rax, %r9 adcxq %rax, %r8 adoxq zero, %r9 .elseif ((\arg1 + \arg2) % 8 == 1) mulxq 8*\arg1(x), %rax, %r10 adcxq %rax, %r9 adoxq zero, %r10 .elseif ((\arg1 + \arg2) % 8 == 2) mulxq 8*\arg1(x), %rax, %r11 adcxq %rax, %r10 adoxq zero, %r11 .elseif ((\arg1 + \arg2) % 8 == 3) mulxq 8*\arg1(x), %rax, %r12 adcxq %rax, %r11 adoxq zero, %r12 .elseif ((\arg1 + \arg2) % 8 == 4) mulxq 8*\arg1(x), %rax, %r13 adcxq %rax, %r12 adoxq zero, %r13 .elseif ((\arg1 + \arg2) % 8 == 5) mulxq 8*\arg1(x), %rax, %r14 adcxq %rax, %r13 adoxq zero, %r14 .elseif ((\arg1 + \arg2) % 8 == 6) mulxq 8*\arg1(x), %rax, %r15 adcxq %rax, %r14 adoxq zero, %r15 .elseif ((\arg1 + \arg2) % 8 == 7) mulxq 8*\arg1(x), %rax, %r8 adcxq %rax, %r15 adoxq zero, %r8 .endif .endm .macro diagonals xorl zeroe, zeroe // Set initial window [%r8..%r10] + 2 wb = 10 + 20 + 30 + 40 + 50 + 60 + 70 movq (x), %rdx mulxq 8(x), %r9, %rax movq %r9, 8(z) mulxq 16(x), %r10, %rcx adcxq %rax, %r10 movq %r10, 16(z) mulxq 24(x), %r11, %rax adcxq %rcx, %r11 mulxq 32(x), %r12, %rcx adcxq %rax, %r12 mulxq 40(x), %r13, %rax adcxq %rcx, %r13 mulxq 48(x), %r14, %rcx adcxq %rax, %r14 mulxq 56(x), %r15, %r8 adcxq %rcx, %r15 adcxq zero, %r8 // Add in the next diagonal = 21 + 31 + 41 + 51 + 61 + 71 + 54 xorl zeroe, zeroe movq 8(x), %rdx mulpadd 2, 1 movq %r11, 24(z) mulpadd 3, 1 movq %r12, 32(z) mulpadd 4, 1 mulpadd 5, 1 mulpadd 6, 1 mulpade 7, 1 movq 32(x), %rdx mulpade 5, 4 adcxq zero, %r10 // And the next one = 32 + 42 + 52 + 62 + 72 + 64 + 65 xorl zeroe, zeroe movq 16(x), %rdx mulpadd 3, 2 movq %r13, 40(z) mulpadd 4, 2 movq %r14, 48(z) mulpadd 5, 2 mulpadd 6, 2 mulpadd 7, 2 movq 48(x), %rdx mulpade 4, 6 mulpade 5, 6 adcxq zero, %r12 // And the final one = 43 + 53 + 63 + 73 + 74 + 75 + 76 xorl zeroe, zeroe movq 24(x), %rdx mulpadd 4, 3 movq %r15, 56(z) mulpadd 5, 3 movq %r8, 64(z) mulpadd 6, 3 mulpadd 7, 3 movq 56(x), %rdx mulpadd 4, 7 mulpade 5, 7 mulpade 6, 7 adcxq zero, %r14 // Double and add things; use z[1]..z[8] and thereafter the registers // %r9..%r15 which haven't been written back yet xorl zeroe, zeroe movq (x), %rdx mulxq %rdx, %rax, %rcx movq %rax, (z) movq 8(z), %rax adcxq %rax, %rax adoxq %rcx, %rax movq %rax, 8(z) movq 16(z), %rax movq 8(x), %rdx mulxq %rdx, %rdx, %rcx adcxq %rax, %rax adoxq %rdx, %rax movq %rax, 16(z) movq 24(z), %rax adcxq %rax, %rax adoxq %rcx, %rax movq %rax, 24(z) movq 32(z), %rax movq 16(x), %rdx mulxq %rdx, %rdx, %rcx adcxq %rax, %rax adoxq %rdx, %rax movq %rax, 32(z) movq 40(z), %rax adcxq %rax, %rax adoxq %rcx, %rax movq %rax, 40(z) movq 48(z), %rax movq 24(x), %rdx mulxq %rdx, %rdx, %rcx adcxq %rax, %rax adoxq %rdx, %rax movq %rax, 48(z) movq 56(z), %rax adcxq %rax, %rax adoxq %rcx, %rax movq %rax, 56(z) movq 64(z), %rax movq 32(x), %rdx mulxq %rdx, %rdx, %rcx adcxq %rax, %rax adoxq %rdx, %rax movq %rax, 64(z) adcxq %r9, %r9 adoxq %rcx, %r9 movq %r9, 72(z) movq 40(x), %rdx mulxq %rdx, %rdx, %rcx adcxq %r10, %r10 adoxq %rdx, %r10 movq %r10, 80(z) adcxq %r11, %r11 adoxq %rcx, %r11 movq %r11, 88(z) movq 48(x), %rdx mulxq %rdx, %rdx, %rcx adcxq %r12, %r12 adoxq %rdx, %r12 movq %r12, 96(z) adcxq %r13, %r13 adoxq %rcx, %r13 movq %r13, 104(z) movq 56(x), %rdx mulxq %rdx, %rdx, %r15 adcxq %r14, %r14 adoxq %rdx, %r14 movq %r14, 112(z) adcxq zero, %r15 adoxq zero, %r15 movq %r15, 120(z) .endm S2N_BN_SYMBOL(bignum_sqr_8_16): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Do the multiplication diagonals // Real epilog CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr_8_16) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
11,973
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_kmul_16_32.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply z := x * y // Inputs x[16], y[16]; output z[32]; temporary buffer t[>=32] // // extern void bignum_kmul_16_32(uint64_t z[static 32], // const uint64_t x[static 16], // const uint64_t y[static 16], // uint64_t t[static 32]); // // In this x86 code the final temporary space argument t is unused, but // it is retained in the prototype above for API consistency with ARM. // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y, RCX = t // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y, R9 = t // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_kmul_16_32) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_kmul_16_32) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_kmul_16_32) .text // These parameters are kept where they come in #define z %rdi #define x %rsi // This one gets moved to free up %rdx for muls #define y %rcx // Often used for zero #define zero %rbp #define zeroe %ebp // mulpadd i, j adds x[i] * rdx (now assumed = y[j]) into the window at i+j .macro mulpadd arg1,arg2 mulxq 8*\arg1(x), %rax, %rbx .if ((\arg1 + \arg2) % 8 == 0) adcxq %rax, %r8 adoxq %rbx, %r9 .elseif ((\arg1 + \arg2) % 8 == 1) adcxq %rax, %r9 adoxq %rbx, %r10 .elseif ((\arg1 + \arg2) % 8 == 2) adcxq %rax, %r10 adoxq %rbx, %r11 .elseif ((\arg1 + \arg2) % 8 == 3) adcxq %rax, %r11 adoxq %rbx, %r12 .elseif ((\arg1 + \arg2) % 8 == 4) adcxq %rax, %r12 adoxq %rbx, %r13 .elseif ((\arg1 + \arg2) % 8 == 5) adcxq %rax, %r13 adoxq %rbx, %r14 .elseif ((\arg1 + \arg2) % 8 == 6) adcxq %rax, %r14 adoxq %rbx, %r15 .elseif ((\arg1 + \arg2) % 8 == 7) adcxq %rax, %r15 adoxq %rbx, %r8 .endif .endm // mulpade i, j adds x[i] * rdx (now assumed = y[j]) into the window at i+j // but re-creates the top word assuming nothing to add there .macro mulpade arg1,arg2 .if ((\arg1 + \arg2) % 8 == 0) mulxq 8*\arg1(x), %rax, %r9 adcxq %rax, %r8 adoxq zero, %r9 .elseif ((\arg1 + \arg2) % 8 == 1) mulxq 8*\arg1(x), %rax, %r10 adcxq %rax, %r9 adoxq zero, %r10 .elseif ((\arg1 + \arg2) % 8 == 2) mulxq 8*\arg1(x), %rax, %r11 adcxq %rax, %r10 adoxq zero, %r11 .elseif ((\arg1 + \arg2) % 8 == 3) mulxq 8*\arg1(x), %rax, %r12 adcxq %rax, %r11 adoxq zero, %r12 .elseif ((\arg1 + \arg2) % 8 == 4) mulxq 8*\arg1(x), %rax, %r13 adcxq %rax, %r12 adoxq zero, %r13 .elseif ((\arg1 + \arg2) % 8 == 5) mulxq 8*\arg1(x), %rax, %r14 adcxq %rax, %r13 adoxq zero, %r14 .elseif ((\arg1 + \arg2) % 8 == 6) mulxq 8*\arg1(x), %rax, %r15 adcxq %rax, %r14 adoxq zero, %r15 .elseif ((\arg1 + \arg2) % 8 == 7) mulxq 8*\arg1(x), %rax, %r8 adcxq %rax, %r15 adoxq zero, %r8 .endif .endm // addrow i adds z[i] + x[0..7] * y[i] into the window .macro addrow arg1 movq 8*\arg1(y), %rdx xorl zeroe, zeroe .if (\arg1 % 8 == 0) adoxq 8*\arg1(z), %r8 .elseif (\arg1 % 8 == 1) adoxq 8*\arg1(z), %r9 .elseif (\arg1 % 8 == 2) adoxq 8*\arg1(z), %r10 .elseif (\arg1 % 8 == 3) adoxq 8*\arg1(z), %r11 .elseif (\arg1 % 8 == 4) adoxq 8*\arg1(z), %r12 .elseif (\arg1 % 8 == 5) adoxq 8*\arg1(z), %r13 .elseif (\arg1 % 8 == 6) adoxq 8*\arg1(z), %r14 .elseif (\arg1 % 8 == 7) adoxq 8*\arg1(z), %r15 .endif mulpadd 0, \arg1 .if (\arg1 % 8 == 0) movq %r8, 8*\arg1(z) .elseif (\arg1 % 8 == 1) movq %r9, 8*\arg1(z) .elseif (\arg1 % 8 == 2) movq %r10, 8*\arg1(z) .elseif (\arg1 % 8 == 3) movq %r11, 8*\arg1(z) .elseif (\arg1 % 8 == 4) movq %r12, 8*\arg1(z) .elseif (\arg1 % 8 == 5) movq %r13, 8*\arg1(z) .elseif (\arg1 % 8 == 6) movq %r14, 8*\arg1(z) .elseif (\arg1 % 8 == 7) movq %r15, 8*\arg1(z) .endif mulpadd 1, \arg1 mulpadd 2, \arg1 mulpadd 3, \arg1 mulpadd 4, \arg1 mulpadd 5, \arg1 mulpadd 6, \arg1 mulpade 7, \arg1 .if (\arg1 % 8 == 0) adcq zero, %r8 .elseif (\arg1 % 8 == 1) adcq zero, %r9 .elseif (\arg1 % 8 == 2) adcq zero, %r10 .elseif (\arg1 % 8 == 3) adcq zero, %r11 .elseif (\arg1 % 8 == 4) adcq zero, %r12 .elseif (\arg1 % 8 == 5) adcq zero, %r13 .elseif (\arg1 % 8 == 6) adcq zero, %r14 .elseif (\arg1 % 8 == 7) adcq zero, %r15 .endif .endm // Special zero version of addrow, setting up the window from scratch .macro addrowz movq (y), %rdx xorl zeroe, zeroe mulxq (x), %rax, %r9 adcq %rax, (z) mulxq 8(x), %rax, %r10 adcq %rax, %r9 mulxq 16(x), %rax, %r11 adcq %rax, %r10 mulxq 24(x), %rax, %r12 adcq %rax, %r11 mulxq 32(x), %rax, %r13 adcq %rax, %r12 mulxq 40(x), %rax, %r14 adcq %rax, %r13 mulxq 48(x), %rax, %r15 adcq %rax, %r14 mulxq 56(x), %rax, %r8 adcq %rax, %r15 adcq zero, %r8 .endm // This is a variant where we add the initial z[0..7] at the outset. // This makes the initialization process a bit less wasteful. By doing // a block of 8 we get the same effect except that we add z[0..7] // // adurow i adds 2^{7*64} * z[i+7] + x[0..7] * y[i] into the window .macro adurow arg1 movq 8*\arg1(y), %rdx xorl zeroe, zeroe mulpadd 0, \arg1 .if (\arg1 % 8 == 0) movq %r8, 8*\arg1(z) .elseif (\arg1 % 8 == 1) movq %r9, 8*\arg1(z) .elseif (\arg1 % 8 == 2) movq %r10, 8*\arg1(z) .elseif (\arg1 % 8 == 3) movq %r11, 8*\arg1(z) .elseif (\arg1 % 8 == 4) movq %r12, 8*\arg1(z) .elseif (\arg1 % 8 == 5) movq %r13, 8*\arg1(z) .elseif (\arg1 % 8 == 6) movq %r14, 8*\arg1(z) .elseif (\arg1 % 8 == 7) movq %r15, 8*\arg1(z) .endif mulpadd 1, \arg1 mulpadd 2, \arg1 mulpadd 3, \arg1 mulpadd 4, \arg1 mulpadd 5, \arg1 mulpadd 6, \arg1 mulpade 7, \arg1 .if (\arg1 % 8 == 0) adcq zero, %r8 .elseif (\arg1 % 8 == 1) adcq zero, %r9 .elseif (\arg1 % 8 == 2) adcq zero, %r10 .elseif (\arg1 % 8 == 3) adcq zero, %r11 .elseif (\arg1 % 8 == 4) adcq zero, %r12 .elseif (\arg1 % 8 == 5) adcq zero, %r13 .elseif (\arg1 % 8 == 6) adcq zero, %r14 .elseif (\arg1 % 8 == 7) adcq zero, %r15 .endif .endm // Special "adurow 0" case to do first stage .macro adurowz movq (y), %rdx xorl zeroe, zeroe movq (z), %r8 movq 8(z), %r9 mulpadd 0, 0 movq %r8, (z) movq 16(z), %r10 mulpadd 1, 0 movq 24(z), %r11 mulpadd 2, 0 movq 32(z), %r12 mulpadd 3, 0 movq 40(z), %r13 mulpadd 4, 0 movq 48(z), %r14 mulpadd 5, 0 movq 56(z), %r15 mulpadd 6, 0 mulxq 56(x), %rax, %r8 adcxq %rax, %r15 adoxq zero, %r8 adcxq zero, %r8 .endm // Multiply-add: z := z + x[0..7] * y .macro addrows adurowz adurow 1 adurow 2 adurow 3 adurow 4 adurow 5 adurow 6 adurow 7 addrow 8 addrow 9 addrow 10 addrow 11 addrow 12 addrow 13 addrow 14 addrow 15 movq %r8, 128(z) movq %r9, 136(z) movq %r10, 144(z) movq %r11, 152(z) movq %r12, 160(z) movq %r13, 168(z) movq %r14, 176(z) movq %r15, 184(z) .endm // mulrow i adds x[0..7] * y[i] into the window // just like addrow but no addition of z[i] .macro mulrow arg1 movq 8*\arg1(y), %rdx xorl zeroe, zeroe mulpadd 0, \arg1 .if (\arg1 % 8 == 0) movq %r8, 8*\arg1(z) .elseif (\arg1 % 8 == 1) movq %r9, 8*\arg1(z) .elseif (\arg1 % 8 == 2) movq %r10, 8*\arg1(z) .elseif (\arg1 % 8 == 3) movq %r11, 8*\arg1(z) .elseif (\arg1 % 8 == 4) movq %r12, 8*\arg1(z) .elseif (\arg1 % 8 == 5) movq %r13, 8*\arg1(z) .elseif (\arg1 % 8 == 6) movq %r14, 8*\arg1(z) .elseif (\arg1 % 8 == 7) movq %r15, 8*\arg1(z) .endif mulpadd 1, \arg1 mulpadd 2, \arg1 mulpadd 3, \arg1 mulpadd 4, \arg1 mulpadd 5, \arg1 mulpadd 6, \arg1 mulpade 7, \arg1 .if (\arg1 % 8 == 0) adcq zero, %r8 .elseif (\arg1 % 8 == 1) adcq zero, %r9 .elseif (\arg1 % 8 == 2) adcq zero, %r10 .elseif (\arg1 % 8 == 3) adcq zero, %r11 .elseif (\arg1 % 8 == 4) adcq zero, %r12 .elseif (\arg1 % 8 == 5) adcq zero, %r13 .elseif (\arg1 % 8 == 6) adcq zero, %r14 .elseif (\arg1 % 8 == 7) adcq zero, %r15 .endif .endm // Special zero version of mulrow, setting up the window from scratch .macro mulrowz movq (y), %rdx xorl zeroe, zeroe mulxq (x), %rax, %r9 movq %rax, (z) mulxq 8(x), %rax, %r10 adcxq %rax, %r9 mulxq 16(x), %rax, %r11 adcxq %rax, %r10 mulxq 24(x), %rax, %r12 adcxq %rax, %r11 mulxq 32(x), %rax, %r13 adcxq %rax, %r12 mulxq 40(x), %rax, %r14 adcxq %rax, %r13 mulxq 48(x), %rax, %r15 adcxq %rax, %r14 mulxq 56(x), %rax, %r8 adcxq %rax, %r15 adcq zero, %r8 .endm // Multiply-add: z := x[0..7] * y plus window .macro mulrows mulrowz mulrow 1 mulrow 2 mulrow 3 mulrow 4 mulrow 5 mulrow 6 mulrow 7 mulrow 8 mulrow 9 mulrow 10 mulrow 11 mulrow 12 mulrow 13 mulrow 14 mulrow 15 movq %r8, 128(z) movq %r9, 136(z) movq %r10, 144(z) movq %r11, 152(z) movq %r12, 160(z) movq %r13, 168(z) movq %r14, 176(z) movq %r15, 184(z) .endm S2N_BN_SYMBOL(bignum_kmul_16_32): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save more registers to play with CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Move y into its permanent home, freeing up %rdx for its special role in muls movq %rdx, y // Do the zeroth row as a pure product then the next as multiply-add mulrows addq $64, z addq $64, x addrows // Restore registers and return CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_kmul_16_32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,942
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_mul_6_12.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply z := x * y // Inputs x[6], y[6]; output z[12] // // extern void bignum_mul_6_12(uint64_t z[static 12], const uint64_t x[static 6], // const uint64_t y[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_6_12) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_6_12) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_6_12) .text // These are actually right #define z %rdi #define x %rsi // Copied in or set up #define y %rcx // A zero register #define zero %rbp #define zeroe %ebp // Add in x[i] * %rdx to the (i,i+1) position with the register window // Would be nice to have conditional expressions reg[i], reg[i+1] ... .macro mulpadd arg1,arg2 mulxq 8*\arg2(x), %rax, %rbx .if ((\arg1 + \arg2) % 6 == 0) adcxq %rax, %r8 adoxq %rbx, %r9 .elseif ((\arg1 + \arg2) % 6 == 1) adcxq %rax, %r9 adoxq %rbx, %r10 .elseif ((\arg1 + \arg2) % 6 == 2) adcxq %rax, %r10 adoxq %rbx, %r11 .elseif ((\arg1 + \arg2) % 6 == 3) adcxq %rax, %r11 adoxq %rbx, %r12 .elseif ((\arg1 + \arg2) % 6 == 4) adcxq %rax, %r12 adoxq %rbx, %r13 .elseif ((\arg1 + \arg2) % 6 == 5) adcxq %rax, %r13 adoxq %rbx, %r8 .endif .endm // Add in the whole j'th row .macro addrow arg1 movq 8*\arg1(y), %rdx xorl zeroe, zeroe mulpadd \arg1, 0 .if (\arg1 % 6 == 0) movq %r8, 8*\arg1(z) .elseif (\arg1 % 6 == 1) movq %r9, 8*\arg1(z) .elseif (\arg1 % 6 == 2) movq %r10, 8*\arg1(z) .elseif (\arg1 % 6 == 3) movq %r11, 8*\arg1(z) .elseif (\arg1 % 6 == 4) movq %r12, 8*\arg1(z) .elseif (\arg1 % 6 == 5) movq %r13, 8*\arg1(z) .endif mulpadd \arg1, 1 mulpadd \arg1, 2 mulpadd \arg1, 3 mulpadd \arg1, 4 .if (\arg1 % 6 == 0) mulxq 40(x), %rax, %r8 adcxq %rax, %r13 adoxq zero, %r8 adcxq zero, %r8 .elseif (\arg1 % 6 == 1) mulxq 40(x), %rax, %r9 adcxq %rax, %r8 adoxq zero, %r9 adcxq zero, %r9 .elseif (\arg1 % 6 == 2) mulxq 40(x), %rax, %r10 adcxq %rax, %r9 adoxq zero, %r10 adcxq zero, %r10 .elseif (\arg1 % 6 == 3) mulxq 40(x), %rax, %r11 adcxq %rax, %r10 adoxq zero, %r11 adcxq zero, %r11 .elseif (\arg1 % 6 == 4) mulxq 40(x), %rax, %r12 adcxq %rax, %r11 adoxq zero, %r12 adcxq zero, %r12 .elseif (\arg1 % 6 == 5) mulxq 40(x), %rax, %r13 adcxq %rax, %r12 adoxq zero, %r13 adcxq zero, %r13 .endif .endm S2N_BN_SYMBOL(bignum_mul_6_12): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save more registers to play with CFI_PUSH(%rbp) CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) // Copy y into a safe register to start with movq %rdx, y // Zero a register, which also makes sure we don't get a fake carry-in xorl zeroe, zeroe // Do the zeroth row, which is a bit different // Write back the zero-zero product and then accumulate // %r8,%r13,%r12,%r11,%r10,%r9 as y[0] * x from 1..6 movq (y), %rdx mulxq (x), %r8, %r9 movq %r8, (z) mulxq 8(x), %rbx, %r10 adcxq %rbx, %r9 mulxq 16(x), %rbx, %r11 adcxq %rbx, %r10 mulxq 24(x), %rbx, %r12 adcxq %rbx, %r11 mulxq 32(x), %rbx, %r13 adcxq %rbx, %r12 mulxq 40(x), %rbx, %r8 adcxq %rbx, %r13 adcxq zero, %r8 // Now all the other rows in a uniform pattern addrow 1 addrow 2 addrow 3 addrow 4 addrow 5 // Now write back the additional columns movq %r8, 48(z) movq %r9, 56(z) movq %r10, 64(z) movq %r11, 72(z) movq %r12, 80(z) movq %r13, 88(z) // Restore registers and return CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul_6_12) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,403
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_sqr_4_8_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square, z := x^2 // Input x[4]; output z[8] // // extern void bignum_sqr_4_8_alt(uint64_t z[static 8], // const uint64_t x[static 4]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_4_8_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_4_8_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_4_8_alt) .text // Input arguments #define z %rdi #define x %rsi // Other variables used as a rotating 3-word window to add terms to #define t0 %rcx #define t1 %r8 #define t2 %r9 // Macro for the key "multiply and add to (c,h,l)" step, for square term #define combadd1(c,h,l,numa) \ movq numa, %rax ; \ mulq %rax; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c // A short form where we don't expect a top carry #define combads(h,l,numa) \ movq numa, %rax ; \ mulq %rax; \ addq %rax, l ; \ adcq %rdx, h // A version doubling before adding, for non-square terms #define combadd2(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0, c ; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c S2N_BN_SYMBOL(bignum_sqr_4_8_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Result term 0 movq (x), %rax mulq %rax movq %rax, (z) movq %rdx, t0 xorq t1, t1 // Result term 1 xorq t2, t2 combadd2(t2,t1,t0,(x),8(x)) movq t0, 8(z) // Result term 2 xorq t0, t0 combadd1(t0,t2,t1,8(x)) combadd2(t0,t2,t1,(x),16(x)) movq t1, 16(z) // Result term 3 xorq t1, t1 combadd2(t1,t0,t2,(x),24(x)) combadd2(t1,t0,t2,8(x),16(x)) movq t2, 24(z) // Result term 4 xorq t2, t2 combadd2(t2,t1,t0,8(x),24(x)) combadd1(t2,t1,t0,16(x)) movq t0, 32(z) // Result term 5 xorq t0, t0 combadd2(t0,t2,t1,16(x),24(x)) movq t1, 40(z) // Result term 6 xorq t1, t1 combads(t0,t2,24(x)) movq t2, 48(z) // Result term 7 movq t0, 56(z) // Return #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr_4_8_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,235
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_sqr_6_12_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square, z := x^2 // Input x[6]; output z[12] // // extern void bignum_sqr_6_12_alt(uint64_t z[static 12], // const uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_6_12_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_6_12_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_6_12_alt) .text // Input arguments #define z %rdi #define x %rsi // Other variables used as a rotating 3-word window to add terms to #define t0 %r8 #define t1 %r9 #define t2 %r10 // Additional temporaries for local windows to share doublings #define u0 %rcx #define u1 %r11 // Macro for the key "multiply and add to (c,h,l)" step #define combadd(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c // Set up initial window (c,h,l) = numa * numb #define combaddz(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ xorq c, c ; \ movq %rax, l ; \ movq %rdx, h // Doubling step (c,h,l) = 2 * (c,hh,ll) + (0,h,l) #define doubladd(c,h,l,hh,ll) \ addq ll, ll ; \ adcq hh, hh ; \ adcq c, c ; \ addq ll, l ; \ adcq hh, h ; \ adcq $0, c // Square term incorporation (c,h,l) += numba^2 #define combadd1(c,h,l,numa) \ movq numa, %rax ; \ mulq %rax; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c // A short form where we don't expect a top carry #define combads(h,l,numa) \ movq numa, %rax ; \ mulq %rax; \ addq %rax, l ; \ adcq %rdx, h // A version doubling directly before adding, for single non-square terms #define combadd2(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0, c ; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c S2N_BN_SYMBOL(bignum_sqr_6_12_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Result term 0 movq (x), %rax mulq %rax movq %rax, (z) movq %rdx, t0 xorq t1, t1 // Result term 1 xorq t2, t2 combadd2(t2,t1,t0,(x),8(x)) movq t0, 8(z) // Result term 2 xorq t0, t0 combadd1(t0,t2,t1,8(x)) combadd2(t0,t2,t1,(x),16(x)) movq t1, 16(z) // Result term 3 combaddz(t1,u1,u0,(x),24(x)) combadd(t1,u1,u0,8(x),16(x)) doubladd(t1,t0,t2,u1,u0) movq t2, 24(z) // Result term 4 combaddz(t2,u1,u0,(x),32(x)) combadd(t2,u1,u0,8(x),24(x)) doubladd(t2,t1,t0,u1,u0) combadd1(t2,t1,t0,16(x)) movq t0, 32(z) // Result term 5 combaddz(t0,u1,u0,(x),40(x)) combadd(t0,u1,u0,8(x),32(x)) combadd(t0,u1,u0,16(x),24(x)) doubladd(t0,t2,t1,u1,u0) movq t1, 40(z) // Result term 6 combaddz(t1,u1,u0,8(x),40(x)) combadd(t1,u1,u0,16(x),32(x)) doubladd(t1,t0,t2,u1,u0) combadd1(t1,t0,t2,24(x)) movq t2, 48(z) // Result term 7 combaddz(t2,u1,u0,16(x),40(x)) combadd(t2,u1,u0,24(x),32(x)) doubladd(t2,t1,t0,u1,u0) movq t0, 56(z) // Result term 8 xorq t0, t0 combadd2(t0,t2,t1,24(x),40(x)) combadd1(t0,t2,t1,32(x)) movq t1, 64(z) // Result term 9 xorq t1, t1 combadd2(t1,t0,t2,32(x),40(x)) movq t2, 72(z) // Result term 10 combads(t1,t0,40(x)) movq t0, 80(z) // Result term 11 movq t1, 88(z) // Return #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr_6_12_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
6,320
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_mul_8_16.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply z := x * y // Inputs x[8], y[8]; output z[16] // // extern void bignum_mul_8_16(uint64_t z[static 16], const uint64_t x[static 8], // const uint64_t y[static 8]); // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_8_16) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_8_16) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_8_16) .text // These are actually right #define z %rdi #define x %rsi // Copied in or set up #define y %rcx // A zero register #define zero %rbp #define zeroe %ebp // mulpadd i, j adds x[i] * rdx (now assumed = y[j]) into the window at i+j .macro mulpadd arg1,arg2 mulxq 8*\arg1(x), %rax, %rbx .if ((\arg1 + \arg2) % 8 == 0) adcxq %rax, %r8 adoxq %rbx, %r9 .elseif ((\arg1 + \arg2) % 8 == 1) adcxq %rax, %r9 adoxq %rbx, %r10 .elseif ((\arg1 + \arg2) % 8 == 2) adcxq %rax, %r10 adoxq %rbx, %r11 .elseif ((\arg1 + \arg2) % 8 == 3) adcxq %rax, %r11 adoxq %rbx, %r12 .elseif ((\arg1 + \arg2) % 8 == 4) adcxq %rax, %r12 adoxq %rbx, %r13 .elseif ((\arg1 + \arg2) % 8 == 5) adcxq %rax, %r13 adoxq %rbx, %r14 .elseif ((\arg1 + \arg2) % 8 == 6) adcxq %rax, %r14 adoxq %rbx, %r15 .elseif ((\arg1 + \arg2) % 8 == 7) adcxq %rax, %r15 adoxq %rbx, %r8 .endif .endm // mulpade i, j adds x[i] * rdx (now assumed = y[j]) into the window at i+j // but re-creates the top word assuming nothing to add there .macro mulpade arg1,arg2 .if ((\arg1 + \arg2) % 8 == 0) mulxq 8*\arg1(x), %rax, %r9 adcxq %rax, %r8 adoxq zero, %r9 .elseif ((\arg1 + \arg2) % 8 == 1) mulxq 8*\arg1(x), %rax, %r10 adcxq %rax, %r9 adoxq zero, %r10 .elseif ((\arg1 + \arg2) % 8 == 2) mulxq 8*\arg1(x), %rax, %r11 adcxq %rax, %r10 adoxq zero, %r11 .elseif ((\arg1 + \arg2) % 8 == 3) mulxq 8*\arg1(x), %rax, %r12 adcxq %rax, %r11 adoxq zero, %r12 .elseif ((\arg1 + \arg2) % 8 == 4) mulxq 8*\arg1(x), %rax, %r13 adcxq %rax, %r12 adoxq zero, %r13 .elseif ((\arg1 + \arg2) % 8 == 5) mulxq 8*\arg1(x), %rax, %r14 adcxq %rax, %r13 adoxq zero, %r14 .elseif ((\arg1 + \arg2) % 8 == 6) mulxq 8*\arg1(x), %rax, %r15 adcxq %rax, %r14 adoxq zero, %r15 .elseif ((\arg1 + \arg2) % 8 == 7) mulxq 8*\arg1(x), %rax, %r8 adcxq %rax, %r15 adoxq zero, %r8 .endif .endm // Add in the whole j'th row .macro addrow arg1 movq 8*\arg1(y), %rdx xorl zeroe, zeroe mulpadd 0, \arg1 .if (\arg1 % 8 == 0) movq %r8, 8*\arg1(z) .elseif (\arg1 % 8 == 1) movq %r9, 8*\arg1(z) .elseif (\arg1 % 8 == 2) movq %r10, 8*\arg1(z) .elseif (\arg1 % 8 == 3) movq %r11, 8*\arg1(z) .elseif (\arg1 % 8 == 4) movq %r12, 8*\arg1(z) .elseif (\arg1 % 8 == 5) movq %r13, 8*\arg1(z) .elseif (\arg1 % 8 == 6) movq %r14, 8*\arg1(z) .elseif (\arg1 % 8 == 7) movq %r15, 8*\arg1(z) .endif mulpadd 1, \arg1 mulpadd 2, \arg1 mulpadd 3, \arg1 mulpadd 4, \arg1 mulpadd 5, \arg1 mulpadd 6, \arg1 mulpade 7, \arg1 .if (\arg1 % 8 == 0) adcq zero, %r8 .elseif (\arg1 % 8 == 1) adcq zero, %r9 .elseif (\arg1 % 8 == 2) adcq zero, %r10 .elseif (\arg1 % 8 == 3) adcq zero, %r11 .elseif (\arg1 % 8 == 4) adcq zero, %r12 .elseif (\arg1 % 8 == 5) adcq zero, %r13 .elseif (\arg1 % 8 == 6) adcq zero, %r14 .elseif (\arg1 % 8 == 7) adcq zero, %r15 .endif .endm S2N_BN_SYMBOL(bignum_mul_8_16): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save more registers to play with CFI_PUSH(%rbp) CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Copy y into a safe register to start with movq %rdx, y // Zero a register, which also makes sure we don't get a fake carry-in xorl zeroe, zeroe // Do the zeroth row, which is a bit different // Write back the zero-zero product and then accumulate // %r8,%r15,%r14,%r13,%r12,%r11,%r10,%r9 as y[0] * x from 1..8 movq (y), %rdx mulxq (x), %r8, %r9 movq %r8, (z) mulxq 8(x), %rbx, %r10 adcq %rbx, %r9 mulxq 16(x), %rbx, %r11 adcq %rbx, %r10 mulxq 24(x), %rbx, %r12 adcq %rbx, %r11 mulxq 32(x), %rbx, %r13 adcq %rbx, %r12 mulxq 40(x), %rbx, %r14 adcq %rbx, %r13 mulxq 48(x), %rbx, %r15 adcq %rbx, %r14 mulxq 56(x), %rbx, %r8 adcq %rbx, %r15 adcq zero, %r8 // Now all the other rows in a uniform pattern addrow 1 addrow 2 addrow 3 addrow 4 addrow 5 addrow 6 addrow 7 // Now write back the additional columns movq %r8, 64(z) movq %r9, 72(z) movq %r10, 80(z) movq %r11, 88(z) movq %r12, 96(z) movq %r13, 104(z) movq %r14, 112(z) movq %r15, 120(z) // Real epilog CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul_8_16) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,993
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_mul_4_8.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply z := x * y // Inputs x[4], y[4]; output z[8] // // extern void bignum_mul_4_8(uint64_t z[static 8], const uint64_t x[static 4], // const uint64_t y[static 4]); // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_4_8) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_4_8) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_4_8) .text // These are actually right #define z %rdi #define x %rsi // Copied in or set up #define y %rcx // A zero register #define zero %rbp #define zeroe %ebp // Add in x[i] * %rdx to the (i,i+1) position with the register window // Would be nice to have conditional expressions reg[i], reg[i+1] ... .macro mulpadd arg1,arg2 mulxq 8*\arg2(x), %rax, %rbx .if ((\arg1 + \arg2) % 4 == 0) adcxq %rax, %r8 adoxq %rbx, %r9 .elseif ((\arg1 + \arg2) % 4 == 1) adcxq %rax, %r9 adoxq %rbx, %r10 .elseif ((\arg1 + \arg2) % 4 == 2) adcxq %rax, %r10 adoxq %rbx, %r11 .elseif ((\arg1 + \arg2) % 4 == 3) adcxq %rax, %r11 adoxq %rbx, %r8 .endif .endm // Add in the whole j'th row .macro addrow arg1 movq 8*\arg1(y), %rdx xorl zeroe, zeroe mulpadd \arg1, 0 .if (\arg1 % 4 == 0) movq %r8, 8*\arg1(z) .elseif (\arg1 % 4 == 1) movq %r9, 8*\arg1(z) .elseif (\arg1 % 4 == 2) movq %r10, 8*\arg1(z) .elseif (\arg1 % 4 == 3) movq %r11, 8*\arg1(z) .endif mulpadd \arg1, 1 mulpadd \arg1, 2 .if (\arg1 % 4 == 0) mulxq 24(x), %rax, %r8 adcxq %rax, %r11 adoxq zero, %r8 adcxq zero, %r8 .elseif (\arg1 % 4 == 1) mulxq 24(x), %rax, %r9 adcxq %rax, %r8 adoxq zero, %r9 adcxq zero, %r9 .elseif (\arg1 % 4 == 2) mulxq 24(x), %rax, %r10 adcxq %rax, %r9 adoxq zero, %r10 adcxq zero, %r10 .elseif (\arg1 % 4 == 3) mulxq 24(x), %rax, %r11 adcxq %rax, %r10 adoxq zero, %r11 adcxq zero, %r11 .endif .endm S2N_BN_SYMBOL(bignum_mul_4_8): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save more registers to play with CFI_PUSH(%rbp) CFI_PUSH(%rbx) // Copy y into a safe register to start with movq %rdx, y // Zero a register, which also makes sure we don't get a fake carry-in xorl zeroe, zeroe // Do the zeroth row, which is a bit different // Write back the zero-zero product and then accumulate // %r8,%r11,%r10,%r9 as y[0] * x from 1..4 movq (y), %rdx mulxq (x), %r8, %r9 movq %r8, (z) mulxq 8(x), %rbx, %r10 adcxq %rbx, %r9 mulxq 16(x), %rbx, %r11 adcxq %rbx, %r10 mulxq 24(x), %rbx, %r8 adcxq %rbx, %r11 adcxq zero, %r8 // Now all the other rows in a uniform pattern addrow 1 addrow 2 addrow 3 // Now write back the additional columns movq %r8, 32(z) movq %r9, 40(z) movq %r10, 48(z) movq %r11, 56(z) // Restore registers and return CFI_POP(%rbx) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul_4_8) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
6,112
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_sqr_8_16_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square, z := x^2 // Input x[8]; output z[16] // // extern void bignum_sqr_8_16_alt(uint64_t z[static 16], // const uint64_t x[static 8]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_8_16_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_8_16_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_8_16_alt) .text // Input arguments #define z %rdi #define x %rsi // Other variables used as a rotating 3-word window to add terms to #define t0 %r8 #define t1 %r9 #define t2 %r10 // Additional temporaries for local windows to share doublings #define u0 %rcx #define u1 %r11 // Macro for the key "multiply and add to (c,h,l)" step #define combadd(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c // Set up initial window (c,h,l) = numa * numb #define combaddz(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ xorq c, c ; \ movq %rax, l ; \ movq %rdx, h // Doubling step (c,h,l) = 2 * (c,hh,ll) + (0,h,l) #define doubladd(c,h,l,hh,ll) \ addq ll, ll ; \ adcq hh, hh ; \ adcq c, c ; \ addq ll, l ; \ adcq hh, h ; \ adcq $0, c // Square term incorporation (c,h,l) += numba^2 #define combadd1(c,h,l,numa) \ movq numa, %rax ; \ mulq %rax; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c // A short form where we don't expect a top carry #define combads(h,l,numa) \ movq numa, %rax ; \ mulq %rax; \ addq %rax, l ; \ adcq %rdx, h // A version doubling directly before adding, for single non-square terms #define combadd2(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0, c ; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c S2N_BN_SYMBOL(bignum_sqr_8_16_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Result term 0 movq (x), %rax mulq %rax movq %rax, (z) movq %rdx, t0 xorq t1, t1 // Result term 1 xorq t2, t2 combadd2(t2,t1,t0,(x),8(x)) movq t0, 8(z) // Result term 2 xorq t0, t0 combadd1(t0,t2,t1,8(x)) combadd2(t0,t2,t1,(x),16(x)) movq t1, 16(z) // Result term 3 combaddz(t1,u1,u0,(x),24(x)) combadd(t1,u1,u0,8(x),16(x)) doubladd(t1,t0,t2,u1,u0) movq t2, 24(z) // Result term 4 combaddz(t2,u1,u0,(x),32(x)) combadd(t2,u1,u0,8(x),24(x)) doubladd(t2,t1,t0,u1,u0) combadd1(t2,t1,t0,16(x)) movq t0, 32(z) // Result term 5 combaddz(t0,u1,u0,(x),40(x)) combadd(t0,u1,u0,8(x),32(x)) combadd(t0,u1,u0,16(x),24(x)) doubladd(t0,t2,t1,u1,u0) movq t1, 40(z) // Result term 6 combaddz(t1,u1,u0,(x),48(x)) combadd(t1,u1,u0,8(x),40(x)) combadd(t1,u1,u0,16(x),32(x)) doubladd(t1,t0,t2,u1,u0) combadd1(t1,t0,t2,24(x)) movq t2, 48(z) // Result term 7 combaddz(t2,u1,u0,(x),56(x)) combadd(t2,u1,u0,8(x),48(x)) combadd(t2,u1,u0,16(x),40(x)) combadd(t2,u1,u0,24(x),32(x)) doubladd(t2,t1,t0,u1,u0) movq t0, 56(z) // Result term 8 combaddz(t0,u1,u0,8(x),56(x)) combadd(t0,u1,u0,16(x),48(x)) combadd(t0,u1,u0,24(x),40(x)) doubladd(t0,t2,t1,u1,u0) combadd1(t0,t2,t1,32(x)) movq t1, 64(z) // Result term 9 combaddz(t1,u1,u0,16(x),56(x)) combadd(t1,u1,u0,24(x),48(x)) combadd(t1,u1,u0,32(x),40(x)) doubladd(t1,t0,t2,u1,u0) movq t2, 72(z) // Result term 10 combaddz(t2,u1,u0,24(x),56(x)) combadd(t2,u1,u0,32(x),48(x)) doubladd(t2,t1,t0,u1,u0) combadd1(t2,t1,t0,40(x)) movq t0, 80(z) // Result term 11 combaddz(t0,u1,u0,32(x),56(x)) combadd(t0,u1,u0,40(x),48(x)) doubladd(t0,t2,t1,u1,u0) movq t1, 88(z) // Result term 12 xorq t1, t1 combadd2(t1,t0,t2,40(x),56(x)) combadd1(t1,t0,t2,48(x)) movq t2, 96(z) // Result term 13 xorq t2, t2 combadd2(t2,t1,t0,48(x),56(x)) movq t0, 104(z) // Result term 14 combads(t2,t1,56(x)) movq t1, 112(z) // Result term 15 movq t2, 120(z) // Return #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr_8_16_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
22,660
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_ksqr_32_64.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square, z := x^2 // Input x[32]; output z[64]; temporary buffer t[>=72] // // extern void bignum_ksqr_32_64(uint64_t z[static 64], // const uint64_t x[static 32], // uint64_t t[static 72]); // // This is a Karatsuba-style function squaring half-sized results // and using temporary buffer t for intermediate results. The size of 72 // is an overstatement for compatibility with the ARM version; it actually // only uses 65 elements of t (64 + 1 for a suspended carry). // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = t // Microsoft x64 ABI: RCX = z, RDX = x, R8 = t // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_ksqr_32_64) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_ksqr_32_64) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_ksqr_32_64) .text #define K 16 #define z %rdi #define x %rsi #define t %rcx S2N_BN_SYMBOL(bignum_ksqr_32_64): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save callee-preserved registers once and for all at the outset // Later we further reshuffle the input arguments to avoid extra saves CFI_PUSH(%rbp) CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Move the temp space pointer since we need %rdx for multiplications movq %rdx, t // Square the low half CFI_CALL(Lbignum_ksqr_32_64_local_bignum_sqr_16_32) // Square the high half; from here on x and z are modified leaq 8*K(x), x // input at x+8*K leaq 16*K(z), z // result at z+16*K CFI_CALL(Lbignum_ksqr_32_64_local_bignum_sqr_16_32) // Form |x_lo - x_hi|, stored at t movq -8*K(x), %rax subq (x), %rax movq %rax, (t) .set I, 1 .rep K-1 movq -8*K+8*I(x), %rax sbbq 8*I(x), %rax movq %rax, 8*I(t) .set I, (I+1) .endr movl $0, %ebx sbbq %rax, %rax // Maintain CF, set ZF for cmovs .set I, 0 .rep K movq 8*I(t), %rdx movq %rdx, %rax notq %rdx adcxq %rbx, %rdx cmovzq %rax, %rdx movq %rdx, 8*I(t) .set I, (I+1) .endr // Compose the middle parts [2,1] + [1,0] + [3,2] // Put the low half of this at t[K] and the top half in place at z[2*K]; a // fully in-place version is awkward with the otherwise beneficial double // carry chain. Stash the carry suspended from the 3k position at the end of // the temp buffer t[4*K]. xorl %edx, %edx .set I, 0 .rep K movq -16*K+8*K+8*I(z), %rax adcxq -16*K+8*I(z), %rax adoxq -16*K+16*K+8*I(z), %rax movq %rax, 8*K+8*I(t) .set I, (I+1) .endr .rep K movq -16*K+8*K+8*I(z), %rax adcxq -16*K+8*I(z), %rax adoxq -16*K+16*K+8*I(z), %rax movq %rax, -16*K+8*K+8*I(z) .set I, (I+1) .endr adoxq %rdx, %rdx adcq $0, %rdx movq %rdx, 32*K(t) // Square the absolute difference, putting the result M at t[2*K]. // This involves another shuffle so now t' = z_orig and x' = t_orig // while z' points within the temp buffer to the product M itself movq t, x leaq -16*K(z), t leaq 16*K(x), z CFI_CALL(Lbignum_ksqr_32_64_local_bignum_sqr_16_32) // Subtract M, pausing at the 3k position to bump down accumulated carry. // The carry cannot go negative since it's the top word of a value // of the form ... + h^2 + l^2 - (h - l)^2 >= 0 movq 8*K(x), %rax subq (z), %rax movq %rax, 8*K(t) .set I, 1 .rep (K-1) movq 8*K+8*I(x), %rax sbbq 8*I(z), %rax movq %rax, 8*K+8*I(t) .set I, (I+1) .endr .rep K movq 8*K+8*I(t), %rax sbbq 8*I(z), %rax movq %rax, 8*K+8*I(t) .set I, (I+1) .endr movq 32*K(x), %rdx sbbq $0, %rdx // Finally propagate the carry to the top quarter xorl %eax, %eax addq %rdx, 24*K(t) .set I, 1 .rep K-1 adcq %rax, 24*K+8*I(t) .set I, (I+1) .endr // Restore registers and return CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET // Local copy of the half-length subroutine S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lbignum_ksqr_32_64_local_bignum_sqr_16_32) Lbignum_ksqr_32_64_local_bignum_sqr_16_32: CFI_START xorl %ebp, %ebp movq (x), %rdx mulxq 0x8(x), %r9, %rax movq %r9, 0x8(z) mulxq 0x10(x), %r10, %rbx adcxq %rax, %r10 movq %r10, 0x10(z) mulxq 0x18(x), %r11, %rax adcxq %rbx, %r11 mulxq 0x20(x), %r12, %rbx adcxq %rax, %r12 mulxq 0x28(x), %r13, %rax adcxq %rbx, %r13 mulxq 0x30(x), %r14, %rbx adcxq %rax, %r14 mulxq 0x38(x), %r15, %r8 adcxq %rbx, %r15 adcxq %rbp, %r8 xorl %ebp, %ebp movq 0x8(x), %rdx mulxq 0x10(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq %r11, 0x18(z) mulxq 0x18(x), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq %r12, 0x20(z) mulxq 0x20(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x28(x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x30(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x38(x), %rax, %r9 adcxq %rax, %r8 adoxq %rbp, %r9 movq 0x20(x), %rdx mulxq 0x28(x), %rax, %r10 adcxq %rax, %r9 adoxq %rbp, %r10 adcxq %rbp, %r10 xorl %ebp, %ebp movq 0x10(x), %rdx mulxq 0x18(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 movq %r13, 0x28(z) mulxq 0x20(x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 movq %r14, 0x30(z) mulxq 0x28(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x30(x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x38(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 movq 0x30(x), %rdx mulxq 0x20(x), %rax, %r11 adcxq %rax, %r10 adoxq %rbp, %r11 mulxq 0x28(x), %rax, %r12 adcxq %rax, %r11 adoxq %rbp, %r12 adcxq %rbp, %r12 xorl %ebp, %ebp movq 0x18(x), %rdx mulxq 0x20(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 movq %r15, 0x38(z) mulxq 0x28(x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x30(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x38(x), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq 0x38(x), %rdx mulxq 0x20(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x28(x), %rax, %r13 adcxq %rax, %r12 adoxq %rbp, %r13 mulxq 0x30(x), %rax, %r14 adcxq %rax, %r13 adoxq %rbp, %r14 adcxq %rbp, %r14 movq 0x40(x), %rdx xorl %ebp, %ebp mulxq (x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 movq %r8, 0x40(z) mulxq 0x8(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x10(x), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x18(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x20(x), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x28(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x30(x), %rax, %r15 adcxq %rax, %r14 adoxq %rbp, %r15 mulxq 0x38(x), %rax, %r8 adcxq %rax, %r15 adoxq %rbp, %r8 adcxq %rbp, %r8 movq 0x48(x), %rdx xorl %ebp, %ebp mulxq (x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 movq %r9, 0x48(z) mulxq 0x8(x), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x10(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(x), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x20(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x28(x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x30(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x38(x), %rax, %r9 adcxq %rax, %r8 adoxq %rbp, %r9 adcxq %rbp, %r9 movq 0x50(x), %rdx xorl %ebp, %ebp mulxq (x), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq %r10, 0x50(z) mulxq 0x8(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x10(x), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x18(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x20(x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x28(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x30(x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x38(x), %rax, %r10 adcxq %rax, %r9 adoxq %rbp, %r10 adcxq %rbp, %r10 movq 0x58(x), %rdx xorl %ebp, %ebp mulxq (x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq %r11, 0x58(z) mulxq 0x8(x), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x10(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x18(x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x20(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x28(x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x30(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x38(x), %rax, %r11 adcxq %rax, %r10 adoxq %rbp, %r11 adcxq %rbp, %r11 movq 0x60(x), %rdx xorl %ebp, %ebp mulxq (x), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq %r12, 0x60(z) mulxq 0x8(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x10(x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x18(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x20(x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x28(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x30(x), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x38(x), %rax, %r12 adcxq %rax, %r11 adoxq %rbp, %r12 adcxq %rbp, %r12 movq 0x68(x), %rdx xorl %ebp, %ebp mulxq (x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 movq %r13, 0x68(z) mulxq 0x8(x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x10(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x18(x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x20(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x28(x), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x30(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x38(x), %rax, %r13 adcxq %rax, %r12 adoxq %rbp, %r13 adcxq %rbp, %r13 movq 0x70(x), %rdx xorl %ebp, %ebp mulxq (x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 movq %r14, 0x70(z) mulxq 0x8(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x10(x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x18(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x20(x), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x28(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x30(x), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x38(x), %rax, %r14 adcxq %rax, %r13 adoxq %rbp, %r14 adcxq %rbp, %r14 movq 0x78(x), %rdx xorl %ebp, %ebp mulxq (x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 movq %r15, 0x78(z) mulxq 0x8(x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x10(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x18(x), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x20(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x28(x), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x30(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x38(x), %rax, %r15 adcxq %rax, %r14 adoxq %rbp, %r15 adcxq %rbp, %r15 movq %r8, 0x80(z) movq %r9, 0x88(z) movq %r10, 0x90(z) movq %r11, 0x98(z) movq %r12, 0xa0(z) movq %r13, 0xa8(z) movq %r14, 0xb0(z) movq %r15, 0xb8(z) xorl %ebp, %ebp movq 0x88(z), %r9 movq 0x90(z), %r10 movq 0x98(z), %r11 movq 0xa0(z), %r12 movq 0xa8(z), %r13 movq 0xb0(z), %r14 movq 0xb8(z), %r15 movq 0x40(x), %rdx mulxq 0x48(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 movq %r9, 0x88(z) mulxq 0x50(x), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq %r10, 0x90(z) mulxq 0x58(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x60(x), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x68(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x70(x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x78(x), %rax, %r8 adcxq %rax, %r15 adoxq %rbp, %r8 adcxq %rbp, %r8 xorl %ebp, %ebp movq 0x48(x), %rdx mulxq 0x50(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq %r11, 0x98(z) mulxq 0x58(x), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq %r12, 0xa0(z) mulxq 0x60(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x68(x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 mulxq 0x70(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x78(x), %rax, %r9 adcxq %rax, %r8 adoxq %rbp, %r9 movq 0x60(x), %rdx mulxq 0x68(x), %rax, %r10 adcxq %rax, %r9 adoxq %rbp, %r10 adcxq %rbp, %r10 xorl %ebp, %ebp movq 0x50(x), %rdx mulxq 0x58(x), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 movq %r13, 0xa8(z) mulxq 0x60(x), %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 movq %r14, 0xb0(z) mulxq 0x68(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 mulxq 0x70(x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x78(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 movq 0x70(x), %rdx mulxq 0x60(x), %rax, %r11 adcxq %rax, %r10 adoxq %rbp, %r11 mulxq 0x68(x), %rax, %r12 adcxq %rax, %r11 adoxq %rbp, %r12 adcxq %rbp, %r12 xorl %ebp, %ebp movq 0x58(x), %rdx mulxq 0x60(x), %rax, %rbx adcxq %rax, %r15 adoxq %rbx, %r8 movq %r15, 0xb8(z) mulxq 0x68(x), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x70(x), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x78(x), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq 0x78(x), %rdx mulxq 0x60(x), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x68(x), %rax, %r13 adcxq %rax, %r12 adoxq %rbp, %r13 mulxq 0x70(x), %rax, %r14 adcxq %rax, %r13 adoxq %rbp, %r14 adcxq %rbp, %r14 movq %r8, 0xc0(z) movq %r9, 0xc8(z) movq %r10, 0xd0(z) movq %r11, 0xd8(z) movq %r12, 0xe0(z) movq %r13, 0xe8(z) movq %r14, 0xf0(z) xorl %ebp, %ebp movq (x), %rdx mulxq %rdx, %rax, %rbx movq %rax, (z) movq 0x8(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0x8(z) movq 0x8(x), %rdx mulxq %rdx, %rax, %rbx movq 0x10(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0x10(z) movq 0x18(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0x18(z) movq 0x10(x), %rdx mulxq %rdx, %rax, %rbx movq 0x20(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0x20(z) movq 0x28(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0x28(z) movq 0x18(x), %rdx mulxq %rdx, %rax, %rbx movq 0x30(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0x30(z) movq 0x38(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0x38(z) movq 0x20(x), %rdx mulxq %rdx, %rax, %rbx movq 0x40(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0x40(z) movq 0x48(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0x48(z) movq 0x28(x), %rdx mulxq %rdx, %rax, %rbx movq 0x50(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0x50(z) movq 0x58(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0x58(z) movq 0x30(x), %rdx mulxq %rdx, %rax, %rbx movq 0x60(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0x60(z) movq 0x68(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0x68(z) movq 0x38(x), %rdx mulxq %rdx, %rax, %rbx movq 0x70(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0x70(z) movq 0x78(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0x78(z) movq 0x40(x), %rdx mulxq %rdx, %rax, %rbx movq 0x80(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0x80(z) movq 0x88(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0x88(z) movq 0x48(x), %rdx mulxq %rdx, %rax, %rbx movq 0x90(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0x90(z) movq 0x98(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0x98(z) movq 0x50(x), %rdx mulxq %rdx, %rax, %rbx movq 0xa0(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0xa0(z) movq 0xa8(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0xa8(z) movq 0x58(x), %rdx mulxq %rdx, %rax, %rbx movq 0xb0(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0xb0(z) movq 0xb8(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0xb8(z) movq 0x60(x), %rdx mulxq %rdx, %rax, %rbx movq 0xc0(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0xc0(z) movq 0xc8(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0xc8(z) movq 0x68(x), %rdx mulxq %rdx, %rax, %rbx movq 0xd0(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0xd0(z) movq 0xd8(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0xd8(z) movq 0x70(x), %rdx mulxq %rdx, %rax, %rbx movq 0xe0(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0xe0(z) movq 0xe8(z), %rdx adcxq %rdx, %rdx adoxq %rbx, %rdx movq %rdx, 0xe8(z) movq 0x78(x), %rdx mulxq %rdx, %rax, %rbx movq 0xf0(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 0xf0(z) adcxq %rbp, %rbx adoxq %rbp, %rbx movq %rbx, 0xf8(z) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lbignum_ksqr_32_64_local_bignum_sqr_16_32) S2N_BN_SIZE_DIRECTIVE(bignum_ksqr_32_64) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
6,031
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_mul_8_16_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply z := x * y // Inputs x[8], y[8]; output z[16] // // extern void bignum_mul_8_16_alt(uint64_t z[static 16], // const uint64_t x[static 8], // const uint64_t y[static 8]); // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_8_16_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_8_16_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_8_16_alt) .text // These are actually right #define z %rdi #define x %rsi // This is moved from %rdx to free it for muls #define y %rcx // Other variables used as a rotating 3-word window to add terms to #define t0 %r8 #define t1 %r9 #define t2 %r10 // Macro for the key "multiply and add to (c,h,l)" step #define combadd(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq $0, c // A minutely shorter form for when c = 0 initially #define combadz(c,h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h ; \ adcq c, c // A short form where we don't expect a top carry #define combads(h,l,numa,numb) \ movq numa, %rax ; \ mulq numb; \ addq %rax, l ; \ adcq %rdx, h S2N_BN_SYMBOL(bignum_mul_8_16_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Copy y into a safe register to start with movq %rdx, y // Result term 0 movq (x), %rax mulq (y) movq %rax, (z) movq %rdx, t0 xorq t1, t1 // Result term 1 xorq t2, t2 combads(t1,t0,(x),8(y)) combadz(t2,t1,t0,8(x),(y)) movq t0, 8(z) // Result term 2 xorq t0, t0 combadz(t0,t2,t1,(x),16(y)) combadd(t0,t2,t1,8(x),8(y)) combadd(t0,t2,t1,16(x),(y)) movq t1, 16(z) // Result term 3 xorq t1, t1 combadz(t1,t0,t2,(x),24(y)) combadd(t1,t0,t2,8(x),16(y)) combadd(t1,t0,t2,16(x),8(y)) combadd(t1,t0,t2,24(x),(y)) movq t2, 24(z) // Result term 4 xorq t2, t2 combadz(t2,t1,t0,(x),32(y)) combadd(t2,t1,t0,8(x),24(y)) combadd(t2,t1,t0,16(x),16(y)) combadd(t2,t1,t0,24(x),8(y)) combadd(t2,t1,t0,32(x),(y)) movq t0, 32(z) // Result term 5 xorq t0, t0 combadz(t0,t2,t1,(x),40(y)) combadd(t0,t2,t1,8(x),32(y)) combadd(t0,t2,t1,16(x),24(y)) combadd(t0,t2,t1,24(x),16(y)) combadd(t0,t2,t1,32(x),8(y)) combadd(t0,t2,t1,40(x),(y)) movq t1, 40(z) // Result term 6 xorq t1, t1 combadz(t1,t0,t2,(x),48(y)) combadd(t1,t0,t2,8(x),40(y)) combadd(t1,t0,t2,16(x),32(y)) combadd(t1,t0,t2,24(x),24(y)) combadd(t1,t0,t2,32(x),16(y)) combadd(t1,t0,t2,40(x),8(y)) combadd(t1,t0,t2,48(x),(y)) movq t2, 48(z) // Result term 7 xorq t2, t2 combadz(t2,t1,t0,(x),56(y)) combadd(t2,t1,t0,8(x),48(y)) combadd(t2,t1,t0,16(x),40(y)) combadd(t2,t1,t0,24(x),32(y)) combadd(t2,t1,t0,32(x),24(y)) combadd(t2,t1,t0,40(x),16(y)) combadd(t2,t1,t0,48(x),8(y)) combadd(t2,t1,t0,56(x),(y)) movq t0, 56(z) // Result term 8 xorq t0, t0 combadz(t0,t2,t1,8(x),56(y)) combadd(t0,t2,t1,16(x),48(y)) combadd(t0,t2,t1,24(x),40(y)) combadd(t0,t2,t1,32(x),32(y)) combadd(t0,t2,t1,40(x),24(y)) combadd(t0,t2,t1,48(x),16(y)) combadd(t0,t2,t1,56(x),8(y)) movq t1, 64(z) // Result term 9 xorq t1, t1 combadz(t1,t0,t2,16(x),56(y)) combadd(t1,t0,t2,24(x),48(y)) combadd(t1,t0,t2,32(x),40(y)) combadd(t1,t0,t2,40(x),32(y)) combadd(t1,t0,t2,48(x),24(y)) combadd(t1,t0,t2,56(x),16(y)) movq t2, 72(z) // Result term 10 xorq t2, t2 combadz(t2,t1,t0,24(x),56(y)) combadd(t2,t1,t0,32(x),48(y)) combadd(t2,t1,t0,40(x),40(y)) combadd(t2,t1,t0,48(x),32(y)) combadd(t2,t1,t0,56(x),24(y)) movq t0, 80(z) // Result term 11 xorq t0, t0 combadz(t0,t2,t1,32(x),56(y)) combadd(t0,t2,t1,40(x),48(y)) combadd(t0,t2,t1,48(x),40(y)) combadd(t0,t2,t1,56(x),32(y)) movq t1, 88(z) // Result term 12 xorq t1, t1 combadz(t1,t0,t2,40(x),56(y)) combadd(t1,t0,t2,48(x),48(y)) combadd(t1,t0,t2,56(x),40(y)) movq t2, 96(z) // Result term 13 xorq t2, t2 combadz(t2,t1,t0,48(x),56(y)) combadd(t2,t1,t0,56(x),48(y)) movq t0, 104(z) // Result term 14 combads(t2,t1,56(x),56(y)) movq t1, 112(z) // Result term 11 movq t2, 120(z) // Return #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul_8_16_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
15,332
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_ksqr_16_32.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square, z := x^2 // Input x[16]; output z[32]; temporary buffer t[>=24] // // extern void bignum_ksqr_16_32(uint64_t z[static 32], // const uint64_t x[static 16], // uint64_t t[static 24]); // // In this x86 code the final temporary space argument t is unused, but // it is retained in the prototype above for API consistency with ARM. // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = t // Microsoft x64 ABI: RCX = z, RDX = x, R8 = t // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_ksqr_16_32) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_ksqr_16_32) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_ksqr_16_32) .text #define z %rdi #define x %rsi // A zero register #define zero %rbp #define zeroe %ebp // ------------------------------------------------------------------------ // mulpadd i, j adds rdx * x[i] into the window at the i+j point // ------------------------------------------------------------------------ .macro mulpadd arg1,arg2 mulxq 8*\arg1(x), %rax, %rcx .if ((\arg1 + \arg2) % 8 == 0) adcxq %rax, %r8 adoxq %rcx, %r9 .elseif ((\arg1 + \arg2) % 8 == 1) adcxq %rax, %r9 adoxq %rcx, %r10 .elseif ((\arg1 + \arg2) % 8 == 2) adcxq %rax, %r10 adoxq %rcx, %r11 .elseif ((\arg1 + \arg2) % 8 == 3) adcxq %rax, %r11 adoxq %rcx, %r12 .elseif ((\arg1 + \arg2) % 8 == 4) adcxq %rax, %r12 adoxq %rcx, %r13 .elseif ((\arg1 + \arg2) % 8 == 5) adcxq %rax, %r13 adoxq %rcx, %r14 .elseif ((\arg1 + \arg2) % 8 == 6) adcxq %rax, %r14 adoxq %rcx, %r15 .elseif ((\arg1 + \arg2) % 8 == 7) adcxq %rax, %r15 adoxq %rcx, %r8 .endif .endm // ------------------------------------------------------------------------ // mulpade i, j adds rdx * x[i] into the window at i+j // but re-creates the top word assuming nothing to add there // ------------------------------------------------------------------------ .macro mulpade arg1,arg2 .if ((\arg1 + \arg2) % 8 == 0) mulxq 8*\arg1(x), %rax, %r9 adcxq %rax, %r8 adoxq zero, %r9 .elseif ((\arg1 + \arg2) % 8 == 1) mulxq 8*\arg1(x), %rax, %r10 adcxq %rax, %r9 adoxq zero, %r10 .elseif ((\arg1 + \arg2) % 8 == 2) mulxq 8*\arg1(x), %rax, %r11 adcxq %rax, %r10 adoxq zero, %r11 .elseif ((\arg1 + \arg2) % 8 == 3) mulxq 8*\arg1(x), %rax, %r12 adcxq %rax, %r11 adoxq zero, %r12 .elseif ((\arg1 + \arg2) % 8 == 4) mulxq 8*\arg1(x), %rax, %r13 adcxq %rax, %r12 adoxq zero, %r13 .elseif ((\arg1 + \arg2) % 8 == 5) mulxq 8*\arg1(x), %rax, %r14 adcxq %rax, %r13 adoxq zero, %r14 .elseif ((\arg1 + \arg2) % 8 == 6) mulxq 8*\arg1(x), %rax, %r15 adcxq %rax, %r14 adoxq zero, %r15 .elseif ((\arg1 + \arg2) % 8 == 7) mulxq 8*\arg1(x), %rax, %r8 adcxq %rax, %r15 adoxq zero, %r8 .endif .endm // ------------------------------------------------------------------------ // addrow i,j adds z[i+j] + x[i..i+7] * x[j] into the window // ------------------------------------------------------------------------ .macro addrow arg1,arg2 movq 8*\arg2(x), %rdx xorl zeroe, zeroe // Get a known flag state and give a zero reg .if ((\arg1 + \arg2) % 8 == 0) adoxq 8*(\arg1+\arg2)(z), %r8 .elseif ((\arg1 + \arg2) % 8 == 1) adoxq 8*(\arg1+\arg2)(z), %r9 .elseif ((\arg1 + \arg2) % 8 == 2) adoxq 8*(\arg1+\arg2)(z), %r10 .elseif ((\arg1 + \arg2) % 8 == 3) adoxq 8*(\arg1+\arg2)(z), %r11 .elseif ((\arg1 + \arg2) % 8 == 4) adoxq 8*(\arg1+\arg2)(z), %r12 .elseif ((\arg1 + \arg2) % 8 == 5) adoxq 8*(\arg1+\arg2)(z), %r13 .elseif ((\arg1 + \arg2) % 8 == 6) adoxq 8*(\arg1+\arg2)(z), %r14 .elseif ((\arg1 + \arg2) % 8 == 7) adoxq 8*(\arg1+\arg2)(z), %r15 .endif mulpadd \arg1, \arg2 .if ((\arg1 + \arg2) % 8 == 0) movq %r8, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 1) movq %r9, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 2) movq %r10, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 3) movq %r11, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 4) movq %r12, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 5) movq %r13, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 6) movq %r14, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 7) movq %r15, 8*(\arg1+\arg2)(z) .endif mulpadd (\arg1+1), \arg2 mulpadd (\arg1+2), \arg2 mulpadd (\arg1+3), \arg2 mulpadd (\arg1+4), \arg2 mulpadd (\arg1+5), \arg2 mulpade (\arg1+6), \arg2 mulpade (\arg1+7), \arg2 .if ((\arg1 + \arg2) % 8 == 0) adcxq zero, %r8 .elseif ((\arg1 + \arg2) % 8 == 1) adcxq zero, %r9 .elseif ((\arg1 + \arg2) % 8 == 2) adcxq zero, %r10 .elseif ((\arg1 + \arg2) % 8 == 3) adcxq zero, %r11 .elseif ((\arg1 + \arg2) % 8 == 4) adcxq zero, %r12 .elseif ((\arg1 + \arg2) % 8 == 5) adcxq zero, %r13 .elseif ((\arg1 + \arg2) % 8 == 6) adcxq zero, %r14 .elseif ((\arg1 + \arg2) % 8 == 7) adcxq zero, %r15 .endif .endm // ------------------------------------------------------------------------ // Adds off-diagonal part of x[i..i+7]^2 into the window, writes 0..7 back // ------------------------------------------------------------------------ .macro sqr arg1 xorl zeroe, zeroe // Set up the initial window movq 16*\arg1+8(z), %r9 movq 16*\arg1+16(z), %r10 movq 16*\arg1+24(z), %r11 movq 16*\arg1+32(z), %r12 movq 16*\arg1+40(z), %r13 movq 16*\arg1+48(z), %r14 movq 16*\arg1+56(z), %r15 // Add in the first diagonal [%r8..%r10] + 2 wb = 10 + 20 + 30 + 40 + 50 + 60 + 70 movq 8*\arg1(x), %rdx mulpadd (\arg1+1), (\arg1+0) movq %r9, 16*\arg1+8(z) mulpadd (\arg1+2), (\arg1+0) movq %r10, 16*\arg1+16(z) mulpadd (\arg1+3), (\arg1+0) mulpadd (\arg1+4), (\arg1+0) mulpadd (\arg1+5), (\arg1+0) mulpadd (\arg1+6), (\arg1+0) mulpade (\arg1+7), (\arg1+0) adcxq zero, %r8 // Add in the next diagonal = 21 + 31 + 41 + 51 + 61 + 71 + 54 xorl zeroe, zeroe movq 8*\arg1+8(x), %rdx mulpadd (\arg1+2), (\arg1+1) movq %r11, 16*\arg1+24(z) mulpadd (\arg1+3), (\arg1+1) movq %r12, 16*\arg1+32(z) mulpadd (\arg1+4), (\arg1+1) mulpadd (\arg1+5), (\arg1+1) mulpadd (\arg1+6), (\arg1+1) mulpade (\arg1+7), (\arg1+1) movq 8*\arg1+32(x), %rdx mulpade (\arg1+5), (\arg1+4) adcxq zero, %r10 // And the next one = 32 + 42 + 52 + 62 + 72 + 64 + 65 xorl zeroe, zeroe movq 8*\arg1+16(x), %rdx mulpadd (\arg1+3), (\arg1+2) movq %r13, 16*\arg1+40(z) mulpadd (\arg1+4), (\arg1+2) movq %r14, 16*\arg1+48(z) mulpadd (\arg1+5), (\arg1+2) mulpadd (\arg1+6), (\arg1+2) mulpadd (\arg1+7), (\arg1+2) movq 8*\arg1+48(x), %rdx mulpade (\arg1+4), (\arg1+6) mulpade (\arg1+5), (\arg1+6) adcxq zero, %r12 // And the final one = 43 + 53 + 63 + 73 + 74 + 75 + 76 xorl zeroe, zeroe movq 8*\arg1+24(x), %rdx mulpadd (\arg1+4), (\arg1+3) movq %r15, 16*\arg1+56(z) mulpadd (\arg1+5), (\arg1+3) mulpadd (\arg1+6), (\arg1+3) mulpadd (\arg1+7), (\arg1+3) movq 8*\arg1+56(x), %rdx mulpadd (\arg1+4), (\arg1+7) mulpade (\arg1+5), (\arg1+7) mulpade (\arg1+6), (\arg1+7) adcxq zero, %r14 .endm // ------------------------------------------------------------------------ // Multiply-add: z := z + x[i...i+7] * x // ------------------------------------------------------------------------ .macro addrows arg1 sqr \arg1 .set I, (\arg1+8) .rep (8-\arg1) addrow \arg1, I .set I, (I+1) .endr movq %r8, 8*(16+\arg1)(z) movq %r9, 8*(17+\arg1)(z) movq %r10, 8*(18+\arg1)(z) movq %r11, 8*(19+\arg1)(z) movq %r12, 8*(20+\arg1)(z) movq %r13, 8*(21+\arg1)(z) movq %r14, 8*(22+\arg1)(z) .endm // ------------------------------------------------------------------------ // mulrow i,j adds x[i..i+7] * x[j] into the window // just like addrow but no addition of z[i+j] // ------------------------------------------------------------------------ .macro mulrow arg1,arg2 movq 8*\arg2(x), %rdx xorl zeroe, zeroe // Get a known flag state and give a zero reg mulpadd \arg1, \arg2 .if ((\arg1 + \arg2) % 8 == 0) movq %r8, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 1) movq %r9, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 2) movq %r10, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 3) movq %r11, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 4) movq %r12, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 5) movq %r13, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 6) movq %r14, 8*(\arg1+\arg2)(z) .elseif ((\arg1 + \arg2) % 8 == 7) movq %r15, 8*(\arg1+\arg2)(z) .endif mulpadd (\arg1+1), \arg2 mulpadd (\arg1+2), \arg2 mulpadd (\arg1+3), \arg2 mulpadd (\arg1+4), \arg2 mulpadd (\arg1+5), \arg2 .if ((\arg1 + \arg2) % 8 == 0) mulpade (\arg1+6), \arg2 .else mulpadd (\arg1+6), \arg2 .endif mulpade (\arg1+7), \arg2 .if ((\arg1 + \arg2) % 8 == 0) adcxq zero, %r8 .elseif ((\arg1 + \arg2) % 8 == 1) adcxq zero, %r9 .elseif ((\arg1 + \arg2) % 8 == 2) adcxq zero, %r10 .elseif ((\arg1 + \arg2) % 8 == 3) adcxq zero, %r11 .elseif ((\arg1 + \arg2) % 8 == 4) adcxq zero, %r12 .elseif ((\arg1 + \arg2) % 8 == 5) adcxq zero, %r13 .elseif ((\arg1 + \arg2) % 8 == 6) adcxq zero, %r14 .elseif ((\arg1 + \arg2) % 8 == 7) adcxq zero, %r15 .endif .endm // ------------------------------------------------------------------------ // Compute off-diagonal part of x[0..7]^2, write back 1..7 elements and // set up the high part in the standard register window. DOES NOT WRITE z[0]! // ------------------------------------------------------------------------ .macro sqrz xorl zeroe, zeroe // Set initial window [%r8..%r10] + 2 wb = 10 + 20 + 30 + 40 + 50 + 60 + 70 movq (x), %rdx mulxq 8(x), %r9, %rax movq %r9, 8(z) mulxq 16(x), %r10, %rcx adcxq %rax, %r10 movq %r10, 16(z) mulxq 24(x), %r11, %rax adcxq %rcx, %r11 mulxq 32(x), %r12, %rcx adcxq %rax, %r12 mulxq 40(x), %r13, %rax adcxq %rcx, %r13 mulxq 48(x), %r14, %rcx adcxq %rax, %r14 mulxq 56(x), %r15, %r8 adcxq %rcx, %r15 adcxq zero, %r8 // Add in the next diagonal = 21 + 31 + 41 + 51 + 61 + 71 + 54 xorl zeroe, zeroe movq 8(x), %rdx mulpadd 2, 1 movq %r11, 24(z) mulpadd 3, 1 movq %r12, 32(z) mulpadd 4, 1 mulpadd 5, 1 mulpadd 6, 1 mulpade 7, 1 movq 32(x), %rdx mulpade 5, 4 adcxq zero, %r10 // And the next one = 32 + 42 + 52 + 62 + 72 + 64 + 65 xorl zeroe, zeroe movq 16(x), %rdx mulpadd 3, 2 movq %r13, 40(z) mulpadd 4, 2 movq %r14, 48(z) mulpadd 5, 2 mulpadd 6, 2 mulpadd 7, 2 movq 48(x), %rdx mulpade 4, 6 mulpade 5, 6 adcxq zero, %r12 // And the final one = 43 + 53 + 63 + 73 + 74 + 75 + 76 xorl zeroe, zeroe movq 24(x), %rdx mulpadd 4, 3 movq %r15, 56(z) mulpadd 5, 3 mulpadd 6, 3 mulpadd 7, 3 movq 56(x), %rdx mulpadd 4, 7 mulpade 5, 7 mulpade 6, 7 adcxq zero, %r14 .endm // ------------------------------------------------------------------------ // Multiply-add: z := x[0...7] * x off-diagonal elements // ------------------------------------------------------------------------ .macro mulrows sqrz .set I, 8 .rep 8 mulrow 0, I .set I, (I+1) .endr movq %r8, 128(z) movq %r9, 136(z) movq %r10, 144(z) movq %r11, 152(z) movq %r12, 160(z) movq %r13, 168(z) movq %r14, 176(z) movq %r15, 184(z) .endm // ------------------------------------------------------------------------ // The actual code // ------------------------------------------------------------------------ S2N_BN_SYMBOL(bignum_ksqr_16_32): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Now just systematically add in the rows to get all off-diagonal elements mulrows addrows 8 // Double and add the diagonal elements. Note that z[0] was never written above xorl zeroe, zeroe movq (x), %rdx mulxq %rdx, %rax, %rcx movq %rax, (z) movq 8(z), %rdx adcxq %rdx, %rdx adoxq %rcx, %rdx movq %rdx, 8(z) .set I, 1 .rep 14 movq 8*I(x), %rdx mulxq %rdx, %rax, %rcx movq 8*(2*I)(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 8*(2*I)(z) movq 8*(2*I+1)(z), %rdx adcxq %rdx, %rdx adoxq %rcx, %rdx movq %rdx, 8*(2*I+1)(z) .set I, (I+1) .endr movq 8*I(x), %rdx mulxq %rdx, %rax, %rcx movq 8*(2*I)(z), %rdx adcxq %rdx, %rdx adoxq %rax, %rdx movq %rdx, 8*(2*I)(z) adcxq zero, %rcx adoxq zero, %rcx movq %rcx, 8*(2*I+1)(z) .set I, (I+1) // Restore registers and return CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_ksqr_16_32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,569
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_sqr_6_12.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square, z := x^2 // Input x[6]; output z[12] // // extern void bignum_sqr_6_12(uint64_t z[static 12], const uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_6_12) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_6_12) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_6_12) .text // These are actually right #define z %rdi #define x %rsi // A zero register #define zero %rbp #define zeroe %ebp // Other registers #define d1 %r8 #define d2 %r9 #define d3 %r10 #define d4 %r11 #define d5 %r12 #define d6 %r13 #define d7 %r14 #define d8 %r15 #define d9 %rbx // Care is needed: re-using the zero register #define d10 %rbp S2N_BN_SYMBOL(bignum_sqr_6_12): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with CFI_PUSH(%rbp) CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Set up an initial window [d8;...d1] = [34;05;03;01] movq (x), %rdx mulxq 8(x), d1, d2 mulxq 24(x), d3, d4 mulxq 40(x), d5, d6 movq 24(x), %rdx mulxq 32(x), d7, d8 // Clear our zero register, and also initialize the flags for the carry chain xorl zeroe, zeroe // Chain in the addition of 02 + 12 + 13 + 14 + 15 to that window // (no carry-out possible since we add it to the top of a product) movq 16(x), %rdx mulxq (x), %rax, %rcx adcxq %rax, d2 adoxq %rcx, d3 mulxq 8(x), %rax, %rcx adcxq %rax, d3 adoxq %rcx, d4 movq 8(x), %rdx mulxq 24(x), %rax, %rcx adcxq %rax, d4 adoxq %rcx, d5 mulxq 32(x), %rax, %rcx adcxq %rax, d5 adoxq %rcx, d6 mulxq 40(x), %rax, %rcx adcxq %rax, d6 adoxq %rcx, d7 adcxq zero, d7 adoxq zero, d8 adcxq zero, d8 // Again zero out the flags. Actually they are already cleared but it may // help decouple these in the OOO engine not to wait for the chain above xorl zeroe, zeroe // Now chain in the 04 + 23 + 24 + 25 + 35 + 45 terms // We are running out of registers and here our zero register is not zero! movq 32(x), %rdx mulxq (x), %rax, %rcx adcxq %rax, d4 adoxq %rcx, d5 movq 16(x), %rdx mulxq 24(x), %rax, %rcx adcxq %rax, d5 adoxq %rcx, d6 mulxq 32(x), %rax, %rcx adcxq %rax, d6 adoxq %rcx, d7 mulxq 40(x), %rax, %rcx adcxq %rax, d7 adoxq %rcx, d8 movq 24(x), %rdx mulxq 40(x), %rax, d9 adcxq %rax, d8 adoxq zero, d9 movq 32(x), %rdx mulxq 40(x), %rax, d10 adcxq %rax, d9 movl $0, %eax adoxq %rax, d10 adcxq %rax, d10 // Again, just for a clear fresh start for the flags xorl %eax, %eax // Double and add to the 00 + 11 + 22 + 33 + 44 + 55 terms // // We could use shift-double but this seems tidier and in larger squarings // it was actually more efficient. I haven't experimented with this small // case to see how much that matters. Note: the writeback here is sprinkled // into the sequence in such a way that things still work if z = x, i.e. if // the output overwrites the input buffer and beyond. movq (x), %rdx mulxq %rdx, %rax, %rdx movq %rax, (z) adcxq d1, d1 adoxq %rdx, d1 movq 8(x), %rdx movq d1, 8(z) mulxq %rdx, %rax, %rdx adcxq d2, d2 adoxq %rax, d2 adcxq d3, d3 adoxq %rdx, d3 movq 16(x), %rdx movq d2, 16(z) mulxq %rdx, %rax, %rdx adcxq d4, d4 adoxq %rax, d4 adcxq d5, d5 adoxq %rdx, d5 movq 24(x), %rdx movq d3, 24(z) mulxq %rdx, %rax, %rdx adcxq d6, d6 adoxq %rax, d6 adcxq d7, d7 adoxq %rdx, d7 movq 32(x), %rdx movq d4, 32(z) mulxq %rdx, %rax, %rdx adcxq d8, d8 adoxq %rax, d8 adcxq d9, d9 adoxq %rdx, d9 movq 40(x), %rdx movq d5, 40(z) mulxq %rdx, %rax, %rdx movq d6, 48(z) adcxq d10, d10 movq d7, 56(z) adoxq %rax, d10 movq d8, 64(z) movl $0, %eax movq d9, 72(z) adcxq %rax, %rdx movq d10, 80(z) adoxq %rax, %rdx movq %rdx, 88(z) // Restore saved registers and return CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr_6_12) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
11,232
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/fastmul/bignum_emontredc_8n.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Extended Montgomery reduce in 8-digit blocks, results in input-output buffer // Inputs z[2*k], m[k], w; outputs function return (extra result bit) and z[2*k] // // extern uint64_t bignum_emontredc_8n(uint64_t k, uint64_t *z, const uint64_t *m, // uint64_t w); // // Functionally equivalent to bignum_emontredc (see that file for more detail). // But in general assumes that the input k is a multiple of 8. // // Standard x86-64 ABI: RDI = k, RSI = z, RDX = m, RCX = w, returns RAX // Microsoft x64 ABI: RCX = k, RDX = z, R8 = m, R9 = w, returns RAX // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_emontredc_8n) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_emontredc_8n) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_emontredc_8n) .text // Original input parameters are here #define z %rsi #define w %rcx // This is copied in early once we stash away k #define m %rdi // A variable z pointer #define zz %rbp // Stack-based variables #define carry (%rsp) #define innercount 8(%rsp) #define outercount 16(%rsp) #define k8m1 24(%rsp) // ----------------------------------------------------------------------------- // Standard macros as used in pure multiplier arrays // ----------------------------------------------------------------------------- // mulpadd i, j adds z[i] * rdx (now assumed = m[j]) into the window at i+j .macro mulpadd arg1,arg2 mulxq 8*\arg1(z), %rax, %rbx .if ((\arg1 + \arg2) % 8 == 0) adcxq %rax, %r8 adoxq %rbx, %r9 .elseif ((\arg1 + \arg2) % 8 == 1) adcxq %rax, %r9 adoxq %rbx, %r10 .elseif ((\arg1 + \arg2) % 8 == 2) adcxq %rax, %r10 adoxq %rbx, %r11 .elseif ((\arg1 + \arg2) % 8 == 3) adcxq %rax, %r11 adoxq %rbx, %r12 .elseif ((\arg1 + \arg2) % 8 == 4) adcxq %rax, %r12 adoxq %rbx, %r13 .elseif ((\arg1 + \arg2) % 8 == 5) adcxq %rax, %r13 adoxq %rbx, %r14 .elseif ((\arg1 + \arg2) % 8 == 6) adcxq %rax, %r14 adoxq %rbx, %r15 .elseif ((\arg1 + \arg2) % 8 == 7) adcxq %rax, %r15 adoxq %rbx, %r8 .endif .endm // addrow i adds z[i] + zz[0..7] * m[j] into the window .macro addrow arg1 movq 8*\arg1(m), %rdx xorl %eax, %eax // Get a known flag state .if (\arg1 % 8 == 0) adoxq 8*\arg1(zz), %r8 .elseif (\arg1 % 8 == 1) adoxq 8*\arg1(zz), %r9 .elseif (\arg1 % 8 == 2) adoxq 8*\arg1(zz), %r10 .elseif (\arg1 % 8 == 3) adoxq 8*\arg1(zz), %r11 .elseif (\arg1 % 8 == 4) adoxq 8*\arg1(zz), %r12 .elseif (\arg1 % 8 == 5) adoxq 8*\arg1(zz), %r13 .elseif (\arg1 % 8 == 6) adoxq 8*\arg1(zz), %r14 .elseif (\arg1 % 8 == 7) adoxq 8*\arg1(zz), %r15 .endif mulpadd 0, \arg1 .if (\arg1 % 8 == 0) movq %r8, 8*\arg1(zz) movl $0, %r8d .elseif (\arg1 % 8 == 1) movq %r9, 8*\arg1(zz) movl $0, %r9d .elseif (\arg1 % 8 == 2) movq %r10, 8*\arg1(zz) movl $0, %r10d .elseif (\arg1 % 8 == 3) movq %r11, 8*\arg1(zz) movl $0, %r11d .elseif (\arg1 % 8 == 4) movq %r12, 8*\arg1(zz) movl $0, %r12d .elseif (\arg1 % 8 == 5) movq %r13, 8*\arg1(zz) movl $0, %r13d .elseif (\arg1 % 8 == 6) movq %r14, 8*\arg1(zz) movl $0, %r14d .elseif (\arg1 % 8 == 7) movq %r15, 8*\arg1(zz) movl $0, %r15d .endif mulpadd 1, \arg1 mulpadd 2, \arg1 mulpadd 3, \arg1 mulpadd 4, \arg1 mulpadd 5, \arg1 mulpadd 6, \arg1 mulpadd 7, \arg1 .if (\arg1 % 8 == 0) adcq $0, %r8 .elseif (\arg1 % 8 == 1) adcq $0, %r9 .elseif (\arg1 % 8 == 2) adcq $0, %r10 .elseif (\arg1 % 8 == 3) adcq $0, %r11 .elseif (\arg1 % 8 == 4) adcq $0, %r12 .elseif (\arg1 % 8 == 5) adcq $0, %r13 .elseif (\arg1 % 8 == 6) adcq $0, %r14 .elseif (\arg1 % 8 == 7) adcq $0, %r15 .endif .endm // ----------------------------------------------------------------------------- // Anti-matter versions with z and m switched, and also not writing back the z // words, but the inverses instead, *and* also adding in the z[0..7] at the // beginning. The aim is to use this in Montgomery where we discover z[j] // entries as we go along. // ----------------------------------------------------------------------------- .macro mulpadda arg1,arg2 mulxq 8*\arg1(m), %rax, %rbx .if ((\arg1 + \arg2) % 8 == 0) adcxq %rax, %r8 adoxq %rbx, %r9 .elseif ((\arg1 + \arg2) % 8 == 1) adcxq %rax, %r9 adoxq %rbx, %r10 .elseif ((\arg1 + \arg2) % 8 == 2) adcxq %rax, %r10 adoxq %rbx, %r11 .elseif ((\arg1 + \arg2) % 8 == 3) adcxq %rax, %r11 adoxq %rbx, %r12 .elseif ((\arg1 + \arg2) % 8 == 4) adcxq %rax, %r12 adoxq %rbx, %r13 .elseif ((\arg1 + \arg2) % 8 == 5) adcxq %rax, %r13 adoxq %rbx, %r14 .elseif ((\arg1 + \arg2) % 8 == 6) adcxq %rax, %r14 adoxq %rbx, %r15 .elseif ((\arg1 + \arg2) % 8 == 7) adcxq %rax, %r15 adoxq %rbx, %r8 .endif .endm .macro adurowa arg1 movq w, %rdx // Get the word-level modular inverse xorl %eax, %eax // Get a known flag state .if (\arg1 % 8 == 0) mulxq %r8, %rdx, %rax .elseif (\arg1 % 8 == 1) mulxq %r9, %rdx, %rax .elseif (\arg1 % 8 == 2) mulxq %r10, %rdx, %rax .elseif (\arg1 % 8 == 3) mulxq %r11, %rdx, %rax .elseif (\arg1 % 8 == 4) mulxq %r12, %rdx, %rax .elseif (\arg1 % 8 == 5) mulxq %r13, %rdx, %rax .elseif (\arg1 % 8 == 6) mulxq %r14, %rdx, %rax .elseif (\arg1 % 8 == 7) mulxq %r15, %rdx, %rax .endif movq %rdx, 8*\arg1(z) // Store multiplier word mulpadda 0, \arg1 // Note that the bottom reg of the window is zero by construction // So it's safe just to use "mulpadda 7" here mulpadda 1, \arg1 mulpadda 2, \arg1 mulpadda 3, \arg1 mulpadda 4, \arg1 mulpadda 5, \arg1 mulpadda 6, \arg1 mulpadda 7, \arg1 // window lowest = 0 beforehand by construction .if (\arg1 % 8 == 0) adcq $0, %r8 .elseif (\arg1 % 8 == 1) adcq $0, %r9 .elseif (\arg1 % 8 == 2) adcq $0, %r10 .elseif (\arg1 % 8 == 3) adcq $0, %r11 .elseif (\arg1 % 8 == 4) adcq $0, %r12 .elseif (\arg1 % 8 == 5) adcq $0, %r13 .elseif (\arg1 % 8 == 6) adcq $0, %r14 .elseif (\arg1 % 8 == 7) adcq $0, %r15 .endif .endm .macro adurowza movq w, %rdx // Get the word-level modular inverse xorl %eax, %eax // Get a known flag state movq (z), %r8 // %r8 = zeroth word mulxq %r8, %rdx, %rax // Compute multiplier word movq %rdx, (z) // Store multiplier word movq 8(z), %r9 mulpadda 0, 0 movq 16(z), %r10 mulpadda 1, 0 movq 24(z), %r11 mulpadda 2, 0 movq 32(z), %r12 mulpadda 3, 0 movq 40(z), %r13 mulpadda 4, 0 movq 48(z), %r14 mulpadda 5, 0 movq 56(z), %r15 mulpadda 6, 0 mulpadda 7, 0 // r8 = 0 beforehand by construction adcq $0, %r8 .endm // ----------------------------------------------------------------------------- // Hybrid top, doing an 8 block specially then multiple additional 8 blocks // ----------------------------------------------------------------------------- // Multiply-add: z := z + x[i...i+7] * m .macro addrows adurowza adurowa 1 adurowa 2 adurowa 3 adurowa 4 adurowa 5 adurowa 6 adurowa 7 movq z, zz movq k8m1, %rax testq %rax, %rax jz Lbignum_emontredc_8n_innerend movq %rax, innercount Lbignum_emontredc_8n_innerloop: addq $64, zz addq $64, m addrow 0 addrow 1 addrow 2 addrow 3 addrow 4 addrow 5 addrow 6 addrow 7 subq $64, innercount jnz Lbignum_emontredc_8n_innerloop movq k8m1, %rax Lbignum_emontredc_8n_innerend: subq %rax, m movq carry, %rbx negq %rbx adcq %r8, 64(z,%rax,1) adcq %r9, 72(z,%rax,1) adcq %r10, 80(z,%rax,1) adcq %r11, 88(z,%rax,1) adcq %r12, 96(z,%rax,1) adcq %r13, 104(z,%rax,1) adcq %r14, 112(z,%rax,1) adcq %r15, 120(z,%rax,1) movl $0, %eax adcq $0, %rax movq %rax, carry .endm // ----------------------------------------------------------------------------- // Main code. // ----------------------------------------------------------------------------- S2N_BN_SYMBOL(bignum_emontredc_8n): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save more registers to play with CFI_PUSH(%rbp) CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Pre-initialize the return value to 0 just in case of early exit below xorl %eax, %eax // Divide the input k by 8, and push k8m1 = (k/8 - 1)<<6 which is used as // the scaled inner loop counter / pointer adjustment repeatedly. Also push // k/8 itself which is here initializing the outer loop count. shrq $3, %rdi jz Lbignum_emontredc_8n_end leaq -1(%rdi), %rbx shlq $6, %rbx CFI_PUSH(%rbx) CFI_PUSH(%rdi) // Make space for two more variables, and set between-stages carry to 0 CFI_DEC_RSP(16) movq $0, carry // Copy m into its main home movq %rdx, m // Now just systematically add in the rows Lbignum_emontredc_8n_outerloop: addrows addq $64, z subq $1, outercount jnz Lbignum_emontredc_8n_outerloop // Pop the carry-out "p", which was stored at [%rsp], put in %rax for return CFI_POP(%rax) // Adjust the stack CFI_INC_RSP(24) // Reset of epilog Lbignum_emontredc_8n_end: CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_emontredc_8n) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,928
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_mod_n384_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo group order, z := x mod n_384 // Input x[6]; output z[6] // // extern void bignum_mod_n384_6(uint64_t z[static 6], const uint64_t x[static 6]); // // Reduction is modulo the group order of the NIST curve P-384. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n384_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_n384_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n384_6) .text #define z %rdi #define x %rsi #define d0 %rdx #define d1 %rcx #define d2 %r8 #define d3 %r9 #define d4 %r10 #define d5 %r11 #define a %rax // Re-use the input pointer as a temporary once we're done #define c %rsi S2N_BN_SYMBOL(bignum_mod_n384_6): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Load the input and compute x + (2^384 - n_384) movq $0x1313e695333ad68d, a movq (x), d0 addq a, d0 movq $0xa7e5f24db74f5885, d1 adcq 8(x), d1 movq $0x389cb27e0bc8d220, d2 adcq 16(x), d2 movq 24(x), d3 adcq $0, d3 movq 32(x), d4 adcq $0, d4 movq 40(x), d5 adcq $0, d5 // Now CF is set iff 2^384 <= x + (2^384 - n_384), i.e. iff n_384 <= x. // Create a mask for the condition x < n. We now want to subtract the // masked (2^384 - n_384), but because we're running out of registers // without using a save-restore sequence, we need some contortions. // Create the lowest digit (re-using a kept from above) sbbq c, c notq c andq c, a // Do the first digit of addition and writeback subq a, d0 movq d0, (z) // Preserve carry chain and do the next digit sbbq d0, d0 movq $0xa7e5f24db74f5885, a andq c, a negq d0 sbbq a, d1 movq d1, 8(z) // Preserve carry chain once more and do remaining digits sbbq d0, d0 movq $0x389cb27e0bc8d220, a andq c, a negq d0 sbbq a, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_n384_6) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,066
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_sub_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Subtract modulo p_384, z := (x - y) mod p_384 // Inputs x[6], y[6]; output z[6] // // extern void bignum_sub_p384(uint64_t z[static 6], const uint64_t x[static 6], // const uint64_t y[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sub_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sub_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sub_p384) .text #define z %rdi #define x %rsi #define y %rdx #define d0 %rax #define d1 %rcx #define d2 %r8 #define d3 %r9 #define d4 %r10 #define d5 %r11 // Re-use the input pointers as temporaries once we're done #define a %rsi #define c %rdx #define ashort %esi S2N_BN_SYMBOL(bignum_sub_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Subtract the inputs as [d5;d4;d3;d2;d1;d0] = x - y (modulo 2^384) // Capture the top carry as a bitmask for the condition x < y movq (x), d0 subq (y), d0 movq 8(x), d1 sbbq 8(y), d1 movq 16(x), d2 sbbq 16(y), d2 movq 24(x), d3 sbbq 24(y), d3 movq 32(x), d4 sbbq 32(y), d4 movq 40(x), d5 sbbq 40(y), d5 sbbq c, c // Use mask to make r' = mask * (2^384 - p_384) for a compensating subtraction // of r_384 = 2^384 - p_384, equivalent to an addition of p_384. // We don't quite have enough ABI-modifiable registers to create all three // nonzero digits of r while maintaining d0..d5, but make the first two now. movl $0x00000000ffffffff, ashort andq a, c // c = masked 0x00000000ffffffff xorq a, a subq c, a // a = masked 0xffffffff00000001 // Do the first two digits of addition and writeback subq a, d0 movq d0, (z) sbbq c, d1 movq d1, 8(z) // Preserve the carry chain while creating the extra masked digit since // the logical operation will clear CF sbbq d0, d0 andq a, c // c = masked 0x0000000000000001 negq d0 // Do the rest of the addition and writeback sbbq c, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sub_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,444
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_mod_n384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo group order, z := x mod n_384 // Input x[k]; output z[6] // // extern void bignum_mod_n384(uint64_t z[static 6], uint64_t k, // const uint64_t *x); // // Reduction is modulo the group order of the NIST curve P-384. // // Standard x86-64 ABI: RDI = z, RSI = k, RDX = x // Microsoft x64 ABI: RCX = z, RDX = k, R8 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_n384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n384) .text #define z %rdi #define k %rsi #define x %rcx #define m0 %r8 #define m1 %r9 #define m2 %r10 #define m3 %r11 #define m4 %r12 #define m5 %r13 #define d %r14 #define n0 %rax #define n1 %rbx #define n2 %rdx #define q %rdx #define n0short %eax #define qshort %edx S2N_BN_SYMBOL(bignum_mod_n384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save extra registers CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) // If the input is already <= 5 words long, go to a trivial "copy" path cmpq $6, k jc Lbignum_mod_n384_shortinput // Otherwise load the top 6 digits (top-down) and reduce k by 6 subq $6, k movq 40(%rdx,k,8), m5 movq 32(%rdx,k,8), m4 movq 24(%rdx,k,8), m3 movq 16(%rdx,k,8), m2 movq 8(%rdx,k,8), m1 movq (%rdx,k,8), m0 // Move x into another register to leave %rdx free for multiplies and use of n2 movq %rdx, x // Reduce the top 6 digits mod n_384 (a conditional subtraction of n_384) movq $0x1313e695333ad68d, n0 movq $0xa7e5f24db74f5885, n1 movq $0x389cb27e0bc8d220, n2 addq n0, m0 adcq n1, m1 adcq n2, m2 adcq $0, m3 adcq $0, m4 adcq $0, m5 sbbq d, d notq d andq d, n0 andq d, n1 andq d, n2 subq n0, m0 sbbq n1, m1 sbbq n2, m2 sbbq $0, m3 sbbq $0, m4 sbbq $0, m5 // Now do (k-6) iterations of 7->6 word modular reduction testq k, k jz Lbignum_mod_n384_writeback Lbignum_mod_n384_loop: // Compute q = min (m5 + 1) (2^64 - 1) movl $1, qshort addq m5, q sbbq d, d orq d, q // Load the next digit so current m to reduce = [m5;m4;m3;m2;m1;m0;d] movq -8(x,k,8), d // Now form [m5;m4;m3;m2;m1;m0;d] = m - q * n_384 subq q, m5 xorq n0, n0 movq $0x1313e695333ad68d, n0 mulxq n0, n0, n1 adcxq n0, d adoxq n1, m0 movq $0xa7e5f24db74f5885, n0 mulxq n0, n0, n1 adcxq n0, m0 adoxq n1, m1 movq $0x389cb27e0bc8d220, n0 mulxq n0, n0, n1 adcxq n0, m1 movl $0, n0short adoxq n0, n1 adcxq n1, m2 adcq $0, m3 adcq $0, m4 adcq $0, m5 // Now our top word m5 is either zero or all 1s. Use it for a masked // addition of n_384, which we can do by a *subtraction* of // 2^384 - n_384 from our portion movq $0x1313e695333ad68d, n0 andq m5, n0 movq $0xa7e5f24db74f5885, n1 andq m5, n1 movq $0x389cb27e0bc8d220, n2 andq m5, n2 subq n0, d sbbq n1, m0 sbbq n2, m1 sbbq $0, m2 sbbq $0, m3 sbbq $0, m4 // Now shuffle registers up and loop movq m4, m5 movq m3, m4 movq m2, m3 movq m1, m2 movq m0, m1 movq d, m0 decq k jnz Lbignum_mod_n384_loop // Write back Lbignum_mod_n384_writeback: movq m0, (z) movq m1, 8(z) movq m2, 16(z) movq m3, 24(z) movq m4, 32(z) movq m5, 40(z) // Restore registers and return CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_n384) Lbignum_mod_n384_shortinput: xorq m0, m0 xorq m1, m1 xorq m2, m2 xorq m3, m3 xorq m4, m4 xorq m5, m5 testq k, k jz Lbignum_mod_n384_writeback movq (%rdx), m0 decq k jz Lbignum_mod_n384_writeback movq 8(%rdx), m1 decq k jz Lbignum_mod_n384_writeback movq 16(%rdx), m2 decq k jz Lbignum_mod_n384_writeback movq 24(%rdx), m3 decq k jz Lbignum_mod_n384_writeback movq 32(%rdx), m4 jmp Lbignum_mod_n384_writeback #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,617
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_mod_p384_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo field characteristic, z := x mod p_384 // Input x[k]; output z[6] // // extern void bignum_mod_p384_alt(uint64_t z[static 6], uint64_t k, // const uint64_t *x); // // Standard x86-64 ABI: RDI = z, RSI = k, RDX = x // Microsoft x64 ABI: RCX = z, RDX = k, R8 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384_alt) .text #define z %rdi #define k %rsi #define x %rcx #define m0 %r8 #define m1 %r9 #define m2 %r10 #define m3 %r11 #define m4 %r12 #define m5 %r13 #define d %r14 #define n0 %rax #define n1 %rbx #define n2 %rdx // Both alias n1 #define q %rbx #define c %rbx #define n0short %eax #define n1short %ebx #define qshort %ebx S2N_BN_SYMBOL(bignum_mod_p384_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save extra registers CFI_PUSH(%rbx) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) // If the input is already <= 5 words long, go to a trivial "copy" path cmpq $6, k jc Lbignum_mod_p384_alt_shortinput // Otherwise load the top 6 digits (top-down) and reduce k by 6 subq $6, k movq 40(%rdx,k,8), m5 movq 32(%rdx,k,8), m4 movq 24(%rdx,k,8), m3 movq 16(%rdx,k,8), m2 movq 8(%rdx,k,8), m1 movq (%rdx,k,8), m0 // Move x into another register to leave %rdx free for multiplies and use of n2 movq %rdx, x // Reduce the top 6 digits mod p_384 (a conditional subtraction of p_384) movl $0x00000000ffffffff, n0short movq $0xffffffff00000000, n1 movq $0xfffffffffffffffe, n2 subq n0, m0 sbbq n1, m1 sbbq n2, m2 sbbq $-1, m3 sbbq $-1, m4 sbbq $-1, m5 sbbq d, d andq d, n0 andq d, n1 andq d, n2 addq n0, m0 adcq n1, m1 adcq n2, m2 adcq d, m3 adcq d, m4 adcq d, m5 // Now do (k-6) iterations of 7->6 word modular reduction testq k, k jz Lbignum_mod_p384_alt_writeback Lbignum_mod_p384_alt_loop: // Compute q = min (m5 + 1) (2^64 - 1) movl $1, qshort addq m5, q sbbq d, d orq d, q // Load the next digit so current m to reduce = [m5;m4;m3;m2;m1;m0;d] movq -8(x,k,8), d // Now form [m5;m4;m3;m2;m1;m0;d] = m - q * p_384. To use an addition for // the main calculation we do (m - 2^384 * q) + q * (2^384 - p_384) // where 2^384 - p_384 = [0;0;0;1;0x00000000ffffffff;0xffffffff00000001]. // The extra subtraction of 2^384 * q is the first instruction. subq q, m5 movq $0xffffffff00000001, %rax mulq q addq %rax, d adcq %rdx, m0 adcq q, m1 movq q, %rax sbbq c, c movl $0x00000000ffffffff, %edx negq c mulq %rdx addq %rax, m0 adcq %rdx, m1 adcq c, m2 adcq $0, m3 adcq $0, m4 adcq $0, m5 // Now our top word m5 is either zero or all 1s. Use it for a masked // addition of p_384, which we can do by a *subtraction* of // 2^384 - p_384 from our portion movq $0xffffffff00000001, n0 andq m5, n0 movl $0x00000000ffffffff, n1short andq m5, n1 andq $1, m5 subq n0, d sbbq n1, m0 sbbq m5, m1 sbbq $0, m2 sbbq $0, m3 sbbq $0, m4 // Now shuffle registers up and loop movq m4, m5 movq m3, m4 movq m2, m3 movq m1, m2 movq m0, m1 movq d, m0 decq k jnz Lbignum_mod_p384_alt_loop // Write back Lbignum_mod_p384_alt_writeback: movq m0, (z) movq m1, 8(z) movq m2, 16(z) movq m3, 24(z) movq m4, 32(z) movq m5, 40(z) // Restore registers and return CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_p384_alt) Lbignum_mod_p384_alt_shortinput: xorq m0, m0 xorq m1, m1 xorq m2, m2 xorq m3, m3 xorq m4, m4 xorq m5, m5 testq k, k jz Lbignum_mod_p384_alt_writeback movq (%rdx), m0 decq k jz Lbignum_mod_p384_alt_writeback movq 8(%rdx), m1 decq k jz Lbignum_mod_p384_alt_writeback movq 16(%rdx), m2 decq k jz Lbignum_mod_p384_alt_writeback movq 24(%rdx), m3 decq k jz Lbignum_mod_p384_alt_writeback movq 32(%rdx), m4 jmp Lbignum_mod_p384_alt_writeback #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
9,677
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_montmul_p384_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^384) mod p_384 // Inputs x[6], y[6]; output z[6] // // extern void bignum_montmul_p384_alt(uint64_t z[static 6], // const uint64_t x[static 6], // const uint64_t y[static 6]); // // Does z := (2^{-384} * x * y) mod p_384, assuming that the inputs x and y // satisfy x * y <= 2^384 * p_384 (in particular this is true if we are in // the "usual" case x < p_384 and y < p_384). // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ----------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p384_alt) .text #define z %rdi #define x %rsi // We move the y argument here so we can use %rdx for multipliers #define y %rcx // Some temp registers for the last correction stage #define d %rax #define u %rdx #define v %rcx #define w %rbx // Add %rbx * m into a register-pair (high,low) maintaining consistent // carry-catching with carry (negated, as bitmask) and using %rax and %rdx // as temporaries #define mulpadd(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ subq carry, %rdx ; \ addq %rax, low ; \ adcq %rdx, high ; \ sbbq carry, carry // Initial version assuming no carry-in #define mulpadi(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ addq %rax, low ; \ adcq %rdx, high ; \ sbbq carry, carry // End version not catching the top carry-out #define mulpade(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ subq carry, %rdx ; \ addq %rax, low ; \ adcq %rdx, high // Core one-step Montgomery reduction macro. Takes input in // [d7;d6;d5;d4;d3;d2;d1;d0] and returns result in [d7;d6;d5;d4;d3;d2;d1], // adding to the existing contents, re-using d0 as a temporary internally // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // // montredc(d7,d6,d5,d4,d3,d2,d1,d0) // // This particular variant, with its mix of addition and subtraction // at the top, is not intended to maintain a coherent carry or borrow out. // It is assumed the final result would fit in [d7;d6;d5;d4;d3;d2;d1]. // which is always the case here as the top word is even always in {0,1} #define montredc(d7,d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rbx ; \ shlq $32, %rbx ; \ addq d0, %rbx ; \ /* Construct [%rbp;%rdx;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 as a temp */ \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, d0 ; \ movq $0x00000000ffffffff, %rax ; \ mulq %rbx; \ addq d0, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq %rdx, d2 ; \ sbbq %rbp, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ sbbq $0, %rbx ; \ addq %rbx, d6 ; \ adcq $0, d7 S2N_BN_SYMBOL(bignum_montmul_p384_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save more registers to play with CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Copy y into a safe register to start with movq %rdx, y // Do row 0 computation, which is a bit different: // set up initial window [%r14,%r13,%r12,%r11,%r10,%r9,%r8] = y[0] * x // Unlike later, we only need a single carry chain movq (y), %rbx movq (x), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 8(x), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 16(x), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 24(x), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 32(x), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 40(x), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d // Montgomery reduce the zeroth window montredc(%r15, %r14,%r13,%r12,%r11,%r10,%r9,%r8) // Add row 1 movq 8(y), %rbx mulpadi(%r8,%r10,%r9,(x)) mulpadd(%r8,%r11,%r10,8(x)) mulpadd(%r8,%r12,%r11,16(x)) mulpadd(%r8,%r13,%r12,24(x)) mulpadd(%r8,%r14,%r13,32(x)) mulpadd(%r8,%r15,%r14,40(x)) negq %r8 // Montgomery reduce window 1 montredc(%r8, %r15,%r14,%r13,%r12,%r11,%r10,%r9) // Add row 2 movq 16(y), %rbx mulpadi(%r9,%r11,%r10,(x)) mulpadd(%r9,%r12,%r11,8(x)) mulpadd(%r9,%r13,%r12,16(x)) mulpadd(%r9,%r14,%r13,24(x)) mulpadd(%r9,%r15,%r14,32(x)) mulpadd(%r9,%r8,%r15,40(x)) negq %r9 // Montgomery reduce window 2 montredc(%r9, %r8,%r15,%r14,%r13,%r12,%r11,%r10) // Add row 3 movq 24(y), %rbx mulpadi(%r10,%r12,%r11,(x)) mulpadd(%r10,%r13,%r12,8(x)) mulpadd(%r10,%r14,%r13,16(x)) mulpadd(%r10,%r15,%r14,24(x)) mulpadd(%r10,%r8,%r15,32(x)) mulpadd(%r10,%r9,%r8,40(x)) negq %r10 // Montgomery reduce window 3 montredc(%r10, %r9,%r8,%r15,%r14,%r13,%r12,%r11) // Add row 4 movq 32(y), %rbx mulpadi(%r11,%r13,%r12,(x)) mulpadd(%r11,%r14,%r13,8(x)) mulpadd(%r11,%r15,%r14,16(x)) mulpadd(%r11,%r8,%r15,24(x)) mulpadd(%r11,%r9,%r8,32(x)) mulpadd(%r11,%r10,%r9,40(x)) negq %r11 // Montgomery reduce window 4 montredc(%r11, %r10,%r9,%r8,%r15,%r14,%r13,%r12) // Add row 5 movq 40(y), %rbx mulpadi(%r12,%r14,%r13,(x)) mulpadd(%r12,%r15,%r14,8(x)) mulpadd(%r12,%r8,%r15,16(x)) mulpadd(%r12,%r9,%r8,24(x)) mulpadd(%r12,%r10,%r9,32(x)) mulpadd(%r12,%r11,%r10,40(x)) negq %r12 // Montgomery reduce window 5 montredc(%r12, %r11,%r10,%r9,%r8,%r15,%r14,%r13) // We now have a pre-reduced 7-word form z = [%r12; %r11;%r10;%r9;%r8;%r15;%r14] // Next, accumulate in different registers z - p_384, or more precisely // // [%r12; %r13;%rbp;%rdx;%rcx;%rbx;%rax] = z + (2^384 - p_384) xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movq $0xffffffff00000001, %rax addq %r14, %rax movl $0x00000000ffffffff, %ebx adcq %r15, %rbx movl $0x0000000000000001, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0, %r12 // ~ZF <=> %r12 >= 1 <=> z + (2^384 - p_384) >= 2^384 <=> z >= p_384, which // determines whether to use the further reduced argument or the original z. cmovnzq %rax, %r14 cmovnzq %rbx, %r15 cmovnzq %rcx, %r8 cmovnzq %rdx, %r9 cmovnzq %rbp, %r10 cmovnzq %r13, %r11 // Write back the result movq %r14, (z) movq %r15, 8(z) movq %r8, 16(z) movq %r9, 24(z) movq %r10, 32(z) movq %r11, 40(z) // Restore registers and return CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p384_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
96,847
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_montinv_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery inverse modulo p_384 = 2^384 - 2^128 - 2^96 + 2^32 - 1 // Input x[6]; output z[6] // // extern void bignum_montinv_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // If the 6-digit input x is coprime to p_384, i.e. is not divisible // by it, returns z < p_384 such that x * z == 2^768 (mod p_384). This // is effectively "Montgomery inverse" because if we consider x and z as // Montgomery forms of X and Z, i.e. x == 2^384 * X and z == 2^384 * Z // (both mod p_384) then X * Z == 1 (mod p_384). That is, this function // gives the analog of the modular inverse bignum_inv_p384 but with both // input and output in the Montgomery domain. Note that x does not need // to be reduced modulo p_384, but the output always is. If the input // is divisible (i.e. is 0 or p_384), then there can be no solution to // the congruence x * z == 2^768 (mod p_384), and z = 0 is returned. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montinv_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montinv_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montinv_p384) .text // Size in bytes of a 64-bit word #define N 8 // Pointer-offset pairs for temporaries on stack // The u and v variables are 6 words each as expected, but the f and g // variables are 8 words each -- they need to have at least one extra // word for a sign word, and to preserve alignment we "round up" to 8. // In fact, we currently keep an extra word in u and v as well. #define f 0(%rsp) #define g (8*N)(%rsp) #define u (16*N)(%rsp) #define v (24*N)(%rsp) #define tmp (32*N)(%rsp) #define tmp2 (33*N)(%rsp) #define i (34*N)(%rsp) #define d (35*N)(%rsp) #define mat (36*N)(%rsp) // Backup for the input pointer #define res (40*N)(%rsp) // Total size to reserve on the stack #define NSPACE 42*N // Syntactic variants to make x86_att version simpler to generate #define F 0 #define G (8*N) #define U (16*N) #define V (24*N) #define MAT (36*N) #define ff (%rsp) #define gg (8*N)(%rsp) // --------------------------------------------------------------------------- // Core signed almost-Montgomery reduction macro from P[6..0] to P[5..0]. // --------------------------------------------------------------------------- #define amontred(P) \ /* We only know the input is -2^444 < x < 2^444. To do traditional */ \ /* unsigned Montgomery reduction, start by adding 2^61 * p_384. */ \ movq $0xe000000000000000, %r8 ; \ xorl %eax, %eax ; \ addq P, %r8 ; \ movq $0x000000001fffffff, %r9 ; \ leaq -1(%rax), %rax ; \ adcq N+P, %r9 ; \ movq $0xdfffffffe0000000, %r10 ; \ adcq 2*N+P, %r10 ; \ movq 3*N+P, %r11 ; \ adcq %rax, %r11 ; \ movq 4*N+P, %r12 ; \ adcq %rax, %r12 ; \ movq 5*N+P, %r13 ; \ adcq %rax, %r13 ; \ movq $0x1fffffffffffffff, %r14 ; \ adcq 6*N+P, %r14 ; \ /* Correction multiplier is %rbx = w = [d0 + (d0<<32)] mod 2^64 */ \ movq %r8, %rbx ; \ shlq $32, %rbx ; \ addq %r8, %rbx ; \ /* Construct [%rbp;%rdx;%rax;-] = (2^384 - p_384) * w */ \ /* We know lowest word will cancel so can re-use %r8 as a temp */ \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r8 ; \ movq $0x00000000ffffffff, %rax ; \ mulq %rbx; \ addq %r8, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ /* Now subtract that and add 2^384 * w, catching carry in %rax */ \ subq %rax, %r9 ; \ sbbq %rdx, %r10 ; \ sbbq %rbp, %r11 ; \ sbbq $0, %r12 ; \ sbbq $0, %r13 ; \ sbbq $0, %r14 ; \ sbbq %rax, %rax ; \ addq %rbx, %r14 ; \ adcq $0, %rax ; \ /* Now if top is nonzero we subtract p_384 (almost-Montgomery) */ \ negq %rax; \ movq $0x00000000ffffffff, %rbx ; \ andq %rax, %rbx ; \ movq $0xffffffff00000000, %rcx ; \ andq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rdx ; \ andq %rax, %rdx ; \ subq %rbx, %r9 ; \ movq %r9, P ; \ sbbq %rcx, %r10 ; \ movq %r10, N+P ; \ sbbq %rdx, %r11 ; \ movq %r11, 2*N+P ; \ sbbq %rax, %r12 ; \ movq %r12, 3*N+P ; \ sbbq %rax, %r13 ; \ movq %r13, 4*N+P ; \ sbbq %rax, %r14 ; \ movq %r14, 5*N+P // Very similar to a subroutine call to the s2n-bignum word_divstep59. // But different in register usage and returning the final matrix as // // [ %r8 %r10] // [ %r12 %r14] // // and also returning the matrix still negated (which doesn't matter) #define divstep59(din,fin,gin) \ movq din, %rsi ; \ movq fin, %rdx ; \ movq gin, %rcx ; \ movq %rdx, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rax ; \ xorl %ebp, %ebp ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %rdx ; \ leaq (%rcx,%rax), %rdi ; \ shlq $0x16, %rdx ; \ shlq $0x16, %rdi ; \ sarq $0x2b, %rdx ; \ sarq $0x2b, %rdi ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %rbx ; \ leaq (%rcx,%rax), %rcx ; \ sarq $0x2a, %rbx ; \ sarq $0x2a, %rcx ; \ movq %rdx, MAT(%rsp) ; \ movq %rbx, MAT+0x8(%rsp) ; \ movq %rdi, MAT+0x10(%rsp) ; \ movq %rcx, MAT+0x18(%rsp) ; \ movq fin, %r12 ; \ imulq %r12, %rdi ; \ imulq %rdx, %r12 ; \ movq gin, %r13 ; \ imulq %r13, %rbx ; \ imulq %rcx, %r13 ; \ addq %rbx, %r12 ; \ addq %rdi, %r13 ; \ sarq $0x14, %r12 ; \ sarq $0x14, %r13 ; \ movq %r12, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ movq %r13, %rcx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rax ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %r8 ; \ leaq (%rcx,%rax), %r10 ; \ shlq $0x16, %r8 ; \ shlq $0x16, %r10 ; \ sarq $0x2b, %r8 ; \ sarq $0x2b, %r10 ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %r15 ; \ leaq (%rcx,%rax), %r11 ; \ sarq $0x2a, %r15 ; \ sarq $0x2a, %r11 ; \ movq %r13, %rbx ; \ movq %r12, %rcx ; \ imulq %r8, %r12 ; \ imulq %r15, %rbx ; \ addq %rbx, %r12 ; \ imulq %r11, %r13 ; \ imulq %r10, %rcx ; \ addq %rcx, %r13 ; \ sarq $0x14, %r12 ; \ sarq $0x14, %r13 ; \ movq %r12, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ movq %r13, %rcx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq MAT(%rsp), %rax ; \ imulq %r8, %rax ; \ movq MAT+0x10(%rsp), %rdx ; \ imulq %r15, %rdx ; \ imulq MAT+0x8(%rsp), %r8 ; \ imulq MAT+0x18(%rsp), %r15 ; \ addq %r8, %r15 ; \ leaq (%rax,%rdx), %r9 ; \ movq MAT(%rsp), %rax ; \ imulq %r10, %rax ; \ movq MAT+0x10(%rsp), %rdx ; \ imulq %r11, %rdx ; \ imulq MAT+0x8(%rsp), %r10 ; \ imulq MAT+0x18(%rsp), %r11 ; \ addq %r10, %r11 ; \ leaq (%rax,%rdx), %r13 ; \ movq $0xfffffffffffffffe, %rax ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %r8 ; \ leaq (%rcx,%rax), %r12 ; \ shlq $0x15, %r8 ; \ shlq $0x15, %r12 ; \ sarq $0x2b, %r8 ; \ sarq $0x2b, %r12 ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %r10 ; \ leaq (%rcx,%rax), %r14 ; \ sarq $0x2b, %r10 ; \ sarq $0x2b, %r14 ; \ movq %r9, %rax ; \ imulq %r8, %rax ; \ movq %r13, %rdx ; \ imulq %r10, %rdx ; \ imulq %r15, %r8 ; \ imulq %r11, %r10 ; \ addq %r8, %r10 ; \ leaq (%rax,%rdx), %r8 ; \ movq %r9, %rax ; \ imulq %r12, %rax ; \ movq %r13, %rdx ; \ imulq %r14, %rdx ; \ imulq %r15, %r12 ; \ imulq %r11, %r14 ; \ addq %r12, %r14 ; \ leaq (%rax,%rdx), %r12 S2N_BN_SYMBOL(bignum_montinv_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Save registers and make room for temporaries CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) CFI_DEC_RSP(NSPACE) // Save the return pointer for the end so we can overwrite %rdi later movq %rdi, res // Copy the constant p_384 into f including the 7th zero digit movl $0xffffffff, %eax movq %rax, F(%rsp) movq %rax, %rbx notq %rbx movq %rbx, F+N(%rsp) xorl %ebp, %ebp leaq -2(%rbp), %rcx movq %rcx, F+2*N(%rsp) leaq -1(%rbp), %rdx movq %rdx, F+3*N(%rsp) movq %rdx, F+4*N(%rsp) movq %rdx, F+5*N(%rsp) movq %rbp, F+6*N(%rsp) // Copy input but to g, reduced mod p_384 so that g <= f as assumed // in the divstep bound proof. movq (%rsi), %r8 subq %rax, %r8 movq N(%rsi), %r9 sbbq %rbx, %r9 movq 2*N(%rsi), %r10 sbbq %rcx, %r10 movq 3*N(%rsi), %r11 sbbq %rdx, %r11 movq 4*N(%rsi), %r12 sbbq %rdx, %r12 movq 5*N(%rsi), %r13 sbbq %rdx, %r13 cmovcq (%rsi), %r8 cmovcq N(%rsi), %r9 cmovcq 2*N(%rsi), %r10 cmovcq 3*N(%rsi), %r11 cmovcq 4*N(%rsi), %r12 cmovcq 5*N(%rsi), %r13 movq %r8, G(%rsp) movq %r9, G+N(%rsp) movq %r10, G+2*N(%rsp) movq %r11, G+3*N(%rsp) movq %r12, G+4*N(%rsp) movq %r13, G+5*N(%rsp) movq %rbp, G+6*N(%rsp) // Also maintain reduced < 2^384 vector [u,v] such that // [f,g] == x * 2^{5*i-843} * [u,v] (mod p_384) // starting with [p_384,x] == x * 2^{5*0-843} * [0,2^843] (mod p_384) // The weird-looking 5*i modifications come in because we are doing // 64-bit word-sized Montgomery reductions at each stage, which is // 5 bits more than the 59-bit requirement to keep things stable. // After the 15th and last iteration and sign adjustment, when // f == 1 for in-scope cases, we have x * 2^{75-843} * u == 1, i.e. // x * u == 2^768 as required. xorl %eax, %eax movq %rax, U(%rsp) movq %rax, U+N(%rsp) movq %rax, U+2*N(%rsp) movq %rax, U+3*N(%rsp) movq %rax, U+4*N(%rsp) movq %rax, U+5*N(%rsp) // The starting constant 2^843 mod p_384 is // 0x0000000000000800:00001000000007ff:fffff00000000000 // :00001000000007ff:fffff00000000800:0000000000000000 // where colons separate 64-bit subwords, least significant at the right. // These are constructed dynamically to reduce large constant loads. movq %rax, V(%rsp) movq $0xfffff00000000800, %rcx movq %rcx, V+N(%rsp) movq $0x00001000000007ff, %rdx movq %rdx, V+2*N(%rsp) btr $11, %rcx movq %rcx, V+3*N(%rsp) movq %rdx, V+4*N(%rsp) bts $11, %rax movq %rax, V+5*N(%rsp) // Start of main loop. We jump into the middle so that the divstep // portion is common to the special fifteenth iteration after a uniform // first 14. movq $15, i movq $1, d jmp Lbignum_montinv_p384_midloop Lbignum_montinv_p384_loop: // Separate out the matrix into sign-magnitude pairs movq %r8, %r9 sarq $63, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $63, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $63, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $63, %r15 xorq %r15, %r14 subq %r15, %r14 // Adjust the initial values to allow for complement instead of negation // This initial offset is the same for [f,g] and [u,v] compositions. // Save it in temporary storage for the [u,v] part and do [f,g] first. movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, tmp movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, tmp2 // Now the computation of the updated f and g values. This maintains a // 2-word carry between stages so we can conveniently insert the shift // right by 59 before storing back, and not overwrite digits we need // again of the old f and g values. // // Digit 0 of [f,g] xorl %ebx, %ebx movq F(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq G(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq F(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq G(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp // Digit 1 of [f,g] xorl %ecx, %ecx movq F+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq G+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $59, %rbx, %rdi movq %rdi, F(%rsp) xorl %edi, %edi movq F+N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq G+N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $59, %rbp, %rsi movq %rsi, G(%rsp) // Digit 2 of [f,g] xorl %esi, %esi movq F+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq G+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $59, %rcx, %rbx movq %rbx, F+N(%rsp) xorl %ebx, %ebx movq F+2*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq G+2*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $59, %rdi, %rbp movq %rbp, G+N(%rsp) // Digit 3 of [f,g] xorl %ebp, %ebp movq F+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq G+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $59, %rsi, %rcx movq %rcx, F+2*N(%rsp) xorl %ecx, %ecx movq F+3*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbx adcq %rdx, %rcx movq G+3*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbx adcq %rdx, %rcx shrdq $59, %rbx, %rdi movq %rdi, G+2*N(%rsp) // Digit 4 of [f,g] xorl %edi, %edi movq F+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbp adcq %rdx, %rdi movq G+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbp adcq %rdx, %rdi shrdq $59, %rbp, %rsi movq %rsi, F+3*N(%rsp) xorl %esi, %esi movq F+4*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rcx adcq %rdx, %rsi movq G+4*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rcx adcq %rdx, %rsi shrdq $59, %rcx, %rbx movq %rbx, G+3*N(%rsp) // Digits 5 and 6 of [f,g] movq F+5*N(%rsp), %rax xorq %r9, %rax movq F+6*N(%rsp), %rbx xorq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq G+5*N(%rsp), %rax xorq %r11, %rax movq G+6*N(%rsp), %rdx xorq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rdi adcq %rdx, %rbx shrdq $59, %rdi, %rbp movq %rbp, F+4*N(%rsp) shrdq $59, %rbx, %rdi sarq $59, %rbx movq F+5*N(%rsp), %rax movq %rdi, F+5*N(%rsp) movq F+6*N(%rsp), %rdi movq %rbx, F+6*N(%rsp) xorq %r13, %rax xorq %r13, %rdi andq %r12, %rdi negq %rdi mulq %r12 addq %rax, %rsi adcq %rdx, %rdi movq G+5*N(%rsp), %rax xorq %r15, %rax movq G+6*N(%rsp), %rdx xorq %r15, %rdx andq %r14, %rdx subq %rdx, %rdi mulq %r14 addq %rax, %rsi adcq %rdx, %rdi shrdq $59, %rsi, %rcx movq %rcx, G+4*N(%rsp) shrdq $59, %rdi, %rsi movq %rsi, G+5*N(%rsp) sarq $59, %rdi movq %rdi, G+6*N(%rsp) // Get the initial carries back from storage and do the [u,v] accumulation movq tmp, %rbx movq tmp2, %rbp // Digit 0 of [u,v] xorl %ecx, %ecx movq U(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V(%rsp) // Digit 1 of [u,v] xorl %ebx, %ebx movq U+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq U+N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, U+N(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq V+N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, V+N(%rsp) // Digit 2 of [u,v] xorl %ecx, %ecx movq U+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U+2*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U+2*N(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V+2*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V+2*N(%rsp) // Digit 3 of [u,v] xorl %ebx, %ebx movq U+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq U+3*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, U+3*N(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq V+3*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, V+3*N(%rsp) // Digit 4 of [u,v] xorl %ecx, %ecx movq U+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U+4*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U+4*N(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V+4*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V+4*N(%rsp) // Digits 5 and 6 of u (top is unsigned) movq U+5*N(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+5*N(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx // Preload for last use of old u digit 3 movq U+5*N(%rsp), %rax movq %rcx, U+5*N(%rsp) movq %rdx, U+6*N(%rsp) // Digits 5 and 6 of v (top is unsigned) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq V+5*N(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rsi, V+5*N(%rsp) movq %rdx, V+6*N(%rsp) // Montgomery reduction of u amontred(u) // Montgomery reduction of v amontred(v) Lbignum_montinv_p384_midloop: divstep59(d,ff,gg) movq %rsi, d // Next iteration decq i jnz Lbignum_montinv_p384_loop // The 15th and last iteration does not need anything except the // u value and the sign of f; the latter can be obtained from the // lowest word of f. So it's done differently from the main loop. // Find the sign of the new f. For this we just need one digit // since we know (for in-scope cases) that f is either +1 or -1. // We don't explicitly shift right by 59 either, but looking at // bit 63 (or any bit >= 60) of the unshifted result is enough // to distinguish -1 from +1; this is then made into a mask. movq F(%rsp), %rax movq G(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $63, %rax // Now separate out the matrix into sign-magnitude pairs // and adjust each one based on the sign of f. // // Note that at this point we expect |f|=1 and we got its // sign above, so then since [f,0] == x * 2^{-768} [u,v] (mod p_384) // we want to flip the sign of u according to that of f. movq %r8, %r9 sarq $63, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $63, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $63, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $63, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 // Adjust the initial value to allow for complement instead of negation movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 // Digit 0 of [u] xorl %r13d, %r13d movq U(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq V(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 movq %r12, U(%rsp) adcq %rdx, %r13 // Digit 1 of [u] xorl %r14d, %r14d movq U+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq V+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 movq %r13, U+N(%rsp) adcq %rdx, %r14 // Digit 2 of [u] xorl %r15d, %r15d movq U+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq V+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 movq %r14, U+2*N(%rsp) adcq %rdx, %r15 // Digit 3 of [u] xorl %r14d, %r14d movq U+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r15 adcq %rdx, %r14 movq V+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r15 movq %r15, U+3*N(%rsp) adcq %rdx, %r14 // Digit 4 of [u] xorl %r15d, %r15d movq U+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq V+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 movq %r14, U+4*N(%rsp) adcq %rdx, %r15 // Digits 5 and 6 of u (top is unsigned) movq U+5*N(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq V+5*N(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 movq %r15, U+5*N(%rsp) adcq %rdx, %r9 movq %r9, U+6*N(%rsp) // Montgomery reduce u amontred(u) // Perform final strict reduction mod p_384 and copy to output movl $0xffffffff, %eax movq %rax, %rbx notq %rbx xorl %ebp, %ebp leaq -2(%rbp), %rcx leaq -1(%rbp), %rdx movq U(%rsp), %r8 subq %rax, %r8 movq U+N(%rsp), %r9 sbbq %rbx, %r9 movq U+2*N(%rsp), %r10 sbbq %rcx, %r10 movq U+3*N(%rsp), %r11 sbbq %rdx, %r11 movq U+4*N(%rsp), %r12 sbbq %rdx, %r12 movq U+5*N(%rsp), %r13 sbbq %rdx, %r13 cmovcq U(%rsp), %r8 cmovcq U+N(%rsp), %r9 cmovcq U+2*N(%rsp), %r10 cmovcq U+3*N(%rsp), %r11 cmovcq U+4*N(%rsp), %r12 cmovcq U+5*N(%rsp), %r13 movq res, %rdi movq %r8, (%rdi) movq %r9, N(%rdi) movq %r10, 2*N(%rdi) movq %r11, 3*N(%rdi) movq %r12, 4*N(%rdi) movq %r13, 5*N(%rdi) // Restore stack and registers CFI_INC_RSP(NSPACE) CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montinv_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
47,867
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/p384_montjmixadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjmixadd(uint64_t p3[static 18], // const uint64_t p1[static 18], // const uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. // // Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2 // Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjmixadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjmixadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjmixadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Pointer-offset pairs for inputs and outputs // These assume %rdi = p3, %rsi = p1 and %rcx = p2, // which needs to be set up explicitly before use. // However the %rdi value never changes. #define x_1 0(%rsi) #define y_1 NUMSIZE(%rsi) #define z_1 (2*NUMSIZE)(%rsi) #define x_2 0(%rcx) #define y_2 NUMSIZE(%rcx) #define x_3 0(%rdi) #define y_3 NUMSIZE(%rdi) #define z_3 (2*NUMSIZE)(%rdi) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define zp2 (NUMSIZE*0)(%rsp) #define ww (NUMSIZE*0)(%rsp) #define resx (NUMSIZE*0)(%rsp) #define yd (NUMSIZE*1)(%rsp) #define y2a (NUMSIZE*1)(%rsp) #define x2a (NUMSIZE*2)(%rsp) #define zzx2 (NUMSIZE*2)(%rsp) #define zz (NUMSIZE*3)(%rsp) #define t1 (NUMSIZE*3)(%rsp) #define t2 (NUMSIZE*4)(%rsp) #define zzx1 (NUMSIZE*4)(%rsp) #define resy (NUMSIZE*4)(%rsp) #define xd (NUMSIZE*5)(%rsp) #define resz (NUMSIZE*5)(%rsp) // Temporaries for the actual input pointers #define input_x (NUMSIZE*6)(%rsp) #define input_y (NUMSIZE*6+8)(%rsp) #define NSPACE 304 // Corresponds exactly to bignum_montmul_p384 #define montmul_p384(P0,P1,P2) \ movq P2, %rdx ; \ xorl %r15d, %r15d ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rbx, %r10 ; \ addq %rbx, %r9 ; \ mulxq 0x10+P1, %rbx, %r11 ; \ adcq %rbx, %r10 ; \ mulxq 0x18+P1, %rbx, %r12 ; \ adcq %rbx, %r11 ; \ mulxq 0x20+P1, %rbx, %r13 ; \ adcq %rbx, %r12 ; \ mulxq 0x28+P1, %rbx, %r14 ; \ adcq %rbx, %r13 ; \ adcq %r15, %r14 ; \ movq %r8, %rdx ; \ shlq $0x20, %rdx ; \ addq %r8, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r8, %rbx ; \ adcq %r8, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbp, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x8+P2, %rdx ; \ xorl %r8d, %r8d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ adoxq %r8, %r15 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r14 ; \ adcq %rbx, %r15 ; \ adcq %r8, %r8 ; \ movq %r9, %rdx ; \ shlq $0x20, %rdx ; \ addq %r9, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r9, %rbx ; \ adcq %r9, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq %rbp, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r15 ; \ adcq $0x0, %r8 ; \ movq 0x10+P2, %rdx ; \ xorl %r9d, %r9d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ adoxq %r9, %r8 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r15 ; \ adcq %rbx, %r8 ; \ adcq %r9, %r9 ; \ movq %r10, %rdx ; \ shlq $0x20, %rdx ; \ addq %r10, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r10, %rbx ; \ adcq %r10, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r11 ; \ sbbq %rbx, %r12 ; \ sbbq %rbp, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r8 ; \ adcq $0x0, %r9 ; \ movq 0x18+P2, %rdx ; \ xorl %r10d, %r10d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r15 ; \ adoxq %rbx, %r8 ; \ adoxq %r10, %r9 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r8 ; \ adcq %rbx, %r9 ; \ adcq %r10, %r10 ; \ movq %r11, %rdx ; \ shlq $0x20, %rdx ; \ addq %r11, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r11, %rbx ; \ adcq %r11, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r12 ; \ sbbq %rbx, %r13 ; \ sbbq %rbp, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r9 ; \ adcq $0x0, %r10 ; \ movq 0x20+P2, %rdx ; \ xorl %r11d, %r11d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r15 ; \ adoxq %rbx, %r8 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ adoxq %r11, %r10 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r9 ; \ adcq %rbx, %r10 ; \ adcq %r11, %r11 ; \ movq %r12, %rdx ; \ shlq $0x20, %rdx ; \ addq %r12, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r12, %rbx ; \ adcq %r12, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r13 ; \ sbbq %rbx, %r14 ; \ sbbq %rbp, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ movq 0x28+P2, %rdx ; \ xorl %r12d, %r12d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r15 ; \ adoxq %rbx, %r8 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ adoxq %r12, %r11 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r10 ; \ adcq %rbx, %r11 ; \ adcq %r12, %r12 ; \ movq %r13, %rdx ; \ shlq $0x20, %rdx ; \ addq %r13, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r13, %rbx ; \ adcq %r13, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r14 ; \ sbbq %rbx, %r15 ; \ sbbq %rbp, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorl %edx, %edx ; \ xorl %ebp, %ebp ; \ xorl %r13d, %r13d ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %ebx ; \ adcq %r15, %rbx ; \ movl $0x1, %ecx ; \ adcq %r8, %rcx ; \ adcq %r9, %rdx ; \ adcq %r10, %rbp ; \ adcq %r11, %r13 ; \ adcq $0x0, %r12 ; \ cmovne %rax, %r14 ; \ cmovne %rbx, %r15 ; \ cmovne %rcx, %r8 ; \ cmovne %rdx, %r9 ; \ cmovne %rbp, %r10 ; \ cmovne %r13, %r11 ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %r8, 0x10+P0 ; \ movq %r9, 0x18+P0 ; \ movq %r10, 0x20+P0 ; \ movq %r11, 0x28+P0 // Corresponds exactly to bignum_montsqr_p384 #define montsqr_p384(P0,P1) \ movq P1, %rdx ; \ mulxq 0x8+P1, %r9, %r10 ; \ mulxq 0x18+P1, %r11, %r12 ; \ mulxq 0x28+P1, %r13, %r14 ; \ movq 0x18+P1, %rdx ; \ mulxq 0x20+P1, %r15, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x10+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ movq 0x8+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ adcxq %rbp, %r15 ; \ adoxq %rbp, %rcx ; \ adcq %rbp, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x20+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ movq 0x10+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x28+P1, %rax, %rdx ; \ adcxq %rax, %r15 ; \ adoxq %rdx, %rcx ; \ movq 0x28+P1, %rdx ; \ mulxq 0x20+P1, %rbx, %rbp ; \ mulxq 0x18+P1, %rax, %rdx ; \ adcxq %rax, %rcx ; \ adoxq %rdx, %rbx ; \ movl $0x0, %eax ; \ adcxq %rax, %rbx ; \ adoxq %rax, %rbp ; \ adcq %rax, %rbp ; \ xorq %rax, %rax ; \ movq P1, %rdx ; \ mulxq P1, %r8, %rax ; \ adcxq %r9, %r9 ; \ adoxq %rax, %r9 ; \ movq 0x8+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r10, %r10 ; \ adoxq %rax, %r10 ; \ adcxq %r11, %r11 ; \ adoxq %rdx, %r11 ; \ movq 0x10+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r12, %r12 ; \ adoxq %rax, %r12 ; \ adcxq %r13, %r13 ; \ adoxq %rdx, %r13 ; \ movq 0x18+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r14, %r14 ; \ adoxq %rax, %r14 ; \ adcxq %r15, %r15 ; \ adoxq %rdx, %r15 ; \ movq 0x20+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %rcx, %rcx ; \ adoxq %rax, %rcx ; \ adcxq %rbx, %rbx ; \ adoxq %rdx, %rbx ; \ movq 0x28+P1, %rdx ; \ mulxq %rdx, %rax, %rsi ; \ adcxq %rbp, %rbp ; \ adoxq %rax, %rbp ; \ movl $0x0, %eax ; \ adcxq %rax, %rsi ; \ adoxq %rax, %rsi ; \ movq %rbx, P0 ; \ movq %r8, %rdx ; \ shlq $0x20, %rdx ; \ addq %r8, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r8, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r8 ; \ addq %rbx, %rax ; \ adcq %rdx, %r8 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r9 ; \ sbbq %r8, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ movq %rdx, %r8 ; \ sbbq $0x0, %r8 ; \ movq %r9, %rdx ; \ shlq $0x20, %rdx ; \ addq %r9, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r9, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r9 ; \ addq %rbx, %rax ; \ adcq %rdx, %r9 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r10 ; \ sbbq %r9, %r11 ; \ sbbq %rbx, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r8 ; \ movq %rdx, %r9 ; \ sbbq $0x0, %r9 ; \ movq %r10, %rdx ; \ shlq $0x20, %rdx ; \ addq %r10, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r10, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r10 ; \ addq %rbx, %rax ; \ adcq %rdx, %r10 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r11 ; \ sbbq %r10, %r12 ; \ sbbq %rbx, %r13 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ movq %rdx, %r10 ; \ sbbq $0x0, %r10 ; \ movq %r11, %rdx ; \ shlq $0x20, %rdx ; \ addq %r11, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r11, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r11 ; \ addq %rbx, %rax ; \ adcq %rdx, %r11 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r12 ; \ sbbq %r11, %r13 ; \ sbbq %rbx, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ movq %rdx, %r11 ; \ sbbq $0x0, %r11 ; \ movq %r12, %rdx ; \ shlq $0x20, %rdx ; \ addq %r12, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r12, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r12 ; \ addq %rbx, %rax ; \ adcq %rdx, %r12 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r13 ; \ sbbq %r12, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %r11 ; \ movq %rdx, %r12 ; \ sbbq $0x0, %r12 ; \ movq %r13, %rdx ; \ shlq $0x20, %rdx ; \ addq %r13, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r13, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r13 ; \ addq %rbx, %rax ; \ adcq %rdx, %r13 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r8 ; \ sbbq %r13, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq $0x0, %r11 ; \ sbbq $0x0, %r12 ; \ movq %rdx, %r13 ; \ sbbq $0x0, %r13 ; \ movq P0, %rbx ; \ addq %r8, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r11, %rbx ; \ adcq %r12, %rbp ; \ adcq %r13, %rsi ; \ movl $0x0, %r8d ; \ adcq %r8, %r8 ; \ xorq %r11, %r11 ; \ xorq %r12, %r12 ; \ xorq %r13, %r13 ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %r9d ; \ adcq %r15, %r9 ; \ movl $0x1, %r10d ; \ adcq %rcx, %r10 ; \ adcq %rbx, %r11 ; \ adcq %rbp, %r12 ; \ adcq %rsi, %r13 ; \ adcq $0x0, %r8 ; \ cmovne %rax, %r14 ; \ cmovne %r9, %r15 ; \ cmovne %r10, %rcx ; \ cmovne %r11, %rbx ; \ cmovne %r12, %rbp ; \ cmovne %r13, %rsi ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %rcx, 0x10+P0 ; \ movq %rbx, 0x18+P0 ; \ movq %rbp, 0x20+P0 ; \ movq %rsi, 0x28+P0 // Almost-Montgomery variant which we use when an input to other muls // with the other argument fully reduced (which is always safe). #define amontsqr_p384(P0,P1) \ movq P1, %rdx ; \ mulxq 0x8+P1, %r9, %r10 ; \ mulxq 0x18+P1, %r11, %r12 ; \ mulxq 0x28+P1, %r13, %r14 ; \ movq 0x18+P1, %rdx ; \ mulxq 0x20+P1, %r15, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x10+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ movq 0x8+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ adcxq %rbp, %r15 ; \ adoxq %rbp, %rcx ; \ adcq %rbp, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x20+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ movq 0x10+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x28+P1, %rax, %rdx ; \ adcxq %rax, %r15 ; \ adoxq %rdx, %rcx ; \ movq 0x28+P1, %rdx ; \ mulxq 0x20+P1, %rbx, %rbp ; \ mulxq 0x18+P1, %rax, %rdx ; \ adcxq %rax, %rcx ; \ adoxq %rdx, %rbx ; \ movl $0x0, %eax ; \ adcxq %rax, %rbx ; \ adoxq %rax, %rbp ; \ adcq %rax, %rbp ; \ xorq %rax, %rax ; \ movq P1, %rdx ; \ mulxq P1, %r8, %rax ; \ adcxq %r9, %r9 ; \ adoxq %rax, %r9 ; \ movq 0x8+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r10, %r10 ; \ adoxq %rax, %r10 ; \ adcxq %r11, %r11 ; \ adoxq %rdx, %r11 ; \ movq 0x10+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r12, %r12 ; \ adoxq %rax, %r12 ; \ adcxq %r13, %r13 ; \ adoxq %rdx, %r13 ; \ movq 0x18+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r14, %r14 ; \ adoxq %rax, %r14 ; \ adcxq %r15, %r15 ; \ adoxq %rdx, %r15 ; \ movq 0x20+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %rcx, %rcx ; \ adoxq %rax, %rcx ; \ adcxq %rbx, %rbx ; \ adoxq %rdx, %rbx ; \ movq 0x28+P1, %rdx ; \ mulxq %rdx, %rax, %rsi ; \ adcxq %rbp, %rbp ; \ adoxq %rax, %rbp ; \ movl $0x0, %eax ; \ adcxq %rax, %rsi ; \ adoxq %rax, %rsi ; \ movq %rbx, P0 ; \ movq %r8, %rdx ; \ shlq $0x20, %rdx ; \ addq %r8, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r8, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r8 ; \ addq %rbx, %rax ; \ adcq %rdx, %r8 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r9 ; \ sbbq %r8, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ movq %rdx, %r8 ; \ sbbq $0x0, %r8 ; \ movq %r9, %rdx ; \ shlq $0x20, %rdx ; \ addq %r9, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r9, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r9 ; \ addq %rbx, %rax ; \ adcq %rdx, %r9 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r10 ; \ sbbq %r9, %r11 ; \ sbbq %rbx, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r8 ; \ movq %rdx, %r9 ; \ sbbq $0x0, %r9 ; \ movq %r10, %rdx ; \ shlq $0x20, %rdx ; \ addq %r10, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r10, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r10 ; \ addq %rbx, %rax ; \ adcq %rdx, %r10 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r11 ; \ sbbq %r10, %r12 ; \ sbbq %rbx, %r13 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ movq %rdx, %r10 ; \ sbbq $0x0, %r10 ; \ movq %r11, %rdx ; \ shlq $0x20, %rdx ; \ addq %r11, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r11, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r11 ; \ addq %rbx, %rax ; \ adcq %rdx, %r11 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r12 ; \ sbbq %r11, %r13 ; \ sbbq %rbx, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ movq %rdx, %r11 ; \ sbbq $0x0, %r11 ; \ movq %r12, %rdx ; \ shlq $0x20, %rdx ; \ addq %r12, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r12, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r12 ; \ addq %rbx, %rax ; \ adcq %rdx, %r12 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r13 ; \ sbbq %r12, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %r11 ; \ movq %rdx, %r12 ; \ sbbq $0x0, %r12 ; \ movq %r13, %rdx ; \ shlq $0x20, %rdx ; \ addq %r13, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r13, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r13 ; \ addq %rbx, %rax ; \ adcq %rdx, %r13 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r8 ; \ sbbq %r13, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq $0x0, %r11 ; \ sbbq $0x0, %r12 ; \ movq %rdx, %r13 ; \ sbbq $0x0, %r13 ; \ movq P0, %rbx ; \ addq %r8, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r11, %rbx ; \ adcq %r12, %rbp ; \ adcq %r13, %rsi ; \ movl $0x0, %r8d ; \ movq $0xffffffff00000001, %rax ; \ movl $0xffffffff, %r9d ; \ movl $0x1, %r10d ; \ cmovnc %r8, %rax ; \ cmovnc %r8, %r9 ; \ cmovnc %r8, %r10 ; \ addq %rax, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r8, %rbx ; \ adcq %r8, %rbp ; \ adcq %r8, %rsi ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %rcx, 0x10+P0 ; \ movq %rbx, 0x18+P0 ; \ movq %rbp, 0x20+P0 ; \ movq %rsi, 0x28+P0 // Corresponds exactly to bignum_sub_p384 #define sub_p384(P0,P1,P2) \ movq P1, %rax ; \ subq P2, %rax ; \ movq 0x8+P1, %rdx ; \ sbbq 0x8+P2, %rdx ; \ movq 0x10+P1, %r8 ; \ sbbq 0x10+P2, %r8 ; \ movq 0x18+P1, %r9 ; \ sbbq 0x18+P2, %r9 ; \ movq 0x20+P1, %r10 ; \ sbbq 0x20+P2, %r10 ; \ movq 0x28+P1, %r11 ; \ sbbq 0x28+P2, %r11 ; \ sbbq %rcx, %rcx ; \ movl $0xffffffff, %esi ; \ andq %rsi, %rcx ; \ xorq %rsi, %rsi ; \ subq %rcx, %rsi ; \ subq %rsi, %rax ; \ movq %rax, P0 ; \ sbbq %rcx, %rdx ; \ movq %rdx, 0x8+P0 ; \ sbbq %rax, %rax ; \ andq %rsi, %rcx ; \ negq %rax; \ sbbq %rcx, %r8 ; \ movq %r8, 0x10+P0 ; \ sbbq $0x0, %r9 ; \ movq %r9, 0x18+P0 ; \ sbbq $0x0, %r10 ; \ movq %r10, 0x20+P0 ; \ sbbq $0x0, %r11 ; \ movq %r11, 0x28+P0 // Additional macros to help with final multiplexing #define testzero6(P) \ movq P, %rax ; \ movq 8+P, %rdx ; \ orq 16+P, %rax ; \ orq 24+P, %rdx ; \ orq 32+P, %rax ; \ orq 40+P, %rdx ; \ orq %rdx, %rax #define mux6(r0,r1,r2,r3,r4,r5,PNE,PEQ) \ movq PEQ, %rax ; \ movq PNE, r0 ; \ cmovzq %rax, r0 ; \ movq 8+PEQ, %rax ; \ movq 8+PNE, r1 ; \ cmovzq %rax, r1 ; \ movq 16+PEQ, %rax ; \ movq 16+PNE, r2 ; \ cmovzq %rax, r2 ; \ movq 24+PEQ, %rax ; \ movq 24+PNE, r3 ; \ cmovzq %rax, r3 ; \ movq 32+PEQ, %rax ; \ movq 32+PNE, r4 ; \ cmovzq %rax, r4 ; \ movq 40+PEQ, %rax ; \ movq 40+PNE, r5 ; \ cmovzq %rax, r5 #define load6(r0,r1,r2,r3,r4,r5,P) \ movq P, r0 ; \ movq 8+P, r1 ; \ movq 16+P, r2 ; \ movq 24+P, r3 ; \ movq 32+P, r4 ; \ movq 40+P, r5 #define store6(P,r0,r1,r2,r3,r4,r5) \ movq r0, P ; \ movq r1, 8+P ; \ movq r2, 16+P ; \ movq r3, 24+P ; \ movq r4, 32+P ; \ movq r5, 40+P S2N_BN_SYMBOL(p384_montjmixadd): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save registers and make room on stack for temporary variables // Put the input arguments in non-volatile places on the stack CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) CFI_DEC_RSP(NSPACE) movq %rsi, input_x movq %rdx, input_y // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract amontsqr_p384(zp2,z_1) movq input_x, %rsi movq input_y, %rcx montmul_p384(y2a,z_1,y_2) movq input_y, %rcx montmul_p384(x2a,zp2,x_2) montmul_p384(y2a,zp2,y2a) movq input_x, %rsi sub_p384(xd,x2a,x_1) movq input_x, %rsi sub_p384(yd,y2a,y_1) amontsqr_p384(zz,xd) montsqr_p384(ww,yd) movq input_x, %rsi montmul_p384(zzx1,zz,x_1) montmul_p384(zzx2,zz,x2a) sub_p384(resx,ww,zzx1) sub_p384(t1,zzx2,zzx1) movq input_x, %rsi montmul_p384(resz,xd,z_1) sub_p384(resx,resx,zzx2) sub_p384(t2,zzx1,resx) movq input_x, %rsi montmul_p384(t1,t1,y_1) montmul_p384(t2,yd,t2) sub_p384(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) movq input_x, %rsi testzero6(z_1) // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in // Montgomery form so not the simple constant 1 but rather 2^384 - p_384), // hence giving 0 + p2 = p2 for the final result. movq input_y, %rcx mux6(%r8,%r9,%r10,%r11,%rbx,%rbp,resx,x_2) mux6(%r12,%r13,%r14,%r15,%rdx,%rcx,resy,y_2) store6(x_3,%r8,%r9,%r10,%r11,%rbx,%rbp) store6(y_3,%r12,%r13,%r14,%r15,%rdx,%rcx) load6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz) movq $0xffffffff00000001, %rax cmovzq %rax, %r8 movl $0x00000000ffffffff, %eax cmovzq %rax, %r9 movq $1, %rax cmovzq %rax, %r10 movl $0, %eax cmovzq %rax, %r11 cmovzq %rax, %rbx cmovzq %rax, %rbp store6(z_3,%r8,%r9,%r10,%r11,%rbx,%rbp) // Restore stack and registers CFI_INC_RSP(NSPACE) CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(p384_montjmixadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
3,747
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_triple_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Triple modulo p_384, z := (3 * x) mod p_384 // Input x[6]; output z[6] // // extern void bignum_triple_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // The input x can be any 6-digit bignum, not necessarily reduced modulo p_384, // and the result is always fully reduced, i.e. z = (3 * x) mod p_384. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p384) .text #define z %rdi #define x %rsi #define d0 %r8 #define d1 %r9 #define d2 %r10 #define d3 %r11 #define d4 %rbx #define d5 %rsi #define a %rax #define c %rcx #define q %rdx #define ashort %eax #define qshort %edx S2N_BN_SYMBOL(bignum_triple_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // We seem to need (just!) one extra register, which we need to save and restore CFI_PUSH(%rbx) // Multiply, accumulating the result as 2^384 * h + [d5;d4;d3;d2;d1;d0] // but actually immediately producing q = h + 1, our quotient approximation, // by adding 1 to it. xorl ashort, ashort movq (x), q movq q, d0 adcxq q, q adoxq q, d0 movq 8(x), q movq q, d1 adcxq q, q adoxq q, d1 movq 16(x), q movq q, d2 adcxq q, q adoxq q, d2 movq 24(x), q movq q, d3 adcxq q, q adoxq q, d3 movq 32(x), q movq q, d4 adcxq q, q adoxq q, d4 movq 40(x), q movq q, d5 adcxq q, q adoxq q, d5 movl $1, qshort adcxq a, q adoxq a, q // Initial subtraction of z - q * p_384, with bitmask c for the carry // Actually done as an addition of (z - 2^384 * h) + q * (2^384 - p_384) // which, because q = h + 1, is exactly 2^384 + (z - q * p_384), and // therefore CF <=> 2^384 + (z - q * p_384) >= 2^384 <=> z >= q * p_384. movq q, c shlq $32, c movq q, a subq c, a sbbq $0, c addq a, d0 adcq c, d1 adcq q, d2 adcq $0, d3 adcq $0, d4 adcq $0, d5 sbbq c, c notq c // Now use that mask for a masked addition of p_384, which again is in // fact done by a masked subtraction of 2^384 - p_384, so that we only // have three nonzero digits and so can avoid using another register. movl $0x00000000ffffffff, qshort xorl ashort, ashort andq c, q subq q, a negq c subq a, d0 movq d0, (z) sbbq q, d1 movq d1, 8(z) sbbq c, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) // Return CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_triple_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
9,488
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_montsqr_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^384) mod p_384 // Input x[6]; output z[6] // // extern void bignum_montsqr_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // Does z := (x^2 / 2^384) mod p_384, assuming x^2 <= 2^384 * p_384, which is // guaranteed in particular if x < p_384 initially (the "intended" case). // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p384) .text #define z %rdi #define x %rsi // Some temp registers for the last correction stage #define d %rax #define u %rdx #define v %r10 #define w %r11 // A zero register, very often #define zero %rbp #define zeroe %ebp // Add %rdx * m into a register-pair (high,low) // maintaining consistent double-carrying with adcx and adox, // using %rax and %rbx as temporaries #define mulpadd(high,low,m) \ mulxq m, %rax, %rbx ; \ adcxq %rax, low ; \ adoxq %rbx, high // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing [d5;d4;d3;d2;d1] and re-using d0 as a // temporary internally, as well as %rax, %rbx and %rdx. // It is OK for d6 and d0 to be the same register (they often are) // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // // montreds(d6,d5,d4,d3,d2,d1,d0) #define montreds(d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rdx ; \ shlq $32, %rdx ; \ addq d0, %rdx ; \ /* Construct [%rbx;d0;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 */ \ /* and %rbx as temps. */ \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, d0, %rax ; \ movl $0x00000000ffffffff, %ebx ; \ mulxq %rbx, %rbx, d0 ; \ addq %rbx, %rax ; \ adcq %rdx, d0 ; \ movl $0, %ebx ; \ adcq %rbx, %rbx ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq d0, d2 ; \ sbbq %rbx, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ movq %rdx, d6 ; \ sbbq $0, d6 S2N_BN_SYMBOL(bignum_montsqr_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Set up an initial window [%rcx;%r15;...%r9] = [34;05;03;01] // Note that we are using %rcx as the first step past the rotating window movq (x), %rdx mulxq 8(x), %r9, %r10 mulxq 24(x), %r11, %r12 mulxq 40(x), %r13, %r14 movq 24(x), %rdx mulxq 32(x), %r15, %rcx // Clear our zero register, and also initialize the flags for the carry chain xorl zeroe, zeroe // Chain in the addition of 02 + 12 + 13 + 14 + 15 to that window // (no carry-out possible) movq 16(x), %rdx mulpadd(%r11,%r10,(x)) mulpadd(%r12,%r11,8(x)) movq 8(x), %rdx mulpadd(%r13,%r12,24(x)) mulpadd(%r14,%r13,32(x)) mulpadd(%r15,%r14,40(x)) adcxq zero, %r15 adoxq zero, %rcx adcq zero, %rcx // Again zero out the flags. Actually they are already cleared but it may // help decouple these in the OOO engine not to wait for the chain above xorl zeroe, zeroe // Now chain in the 04 + 23 + 24 + 25 + 35 + 45 terms // We are running out of registers in our rotating window, so we start // using %rbx (and hence need care with using mulpadd after this). Thus // our result so far is in [%rbp;%rbx;%rcx;%r15;...%r9] movq 32(x), %rdx mulpadd(%r13,%r12,(x)) movq 16(x), %rdx mulpadd(%r14,%r13,24(x)) mulpadd(%r15,%r14,32(x)) mulxq 40(x), %rax, %rdx adcxq %rax, %r15 adoxq %rdx, %rcx // First set up the last couple of spots in our window, [%rbp;%rbx] = 45 // then add the last other term 35 movq 40(x), %rdx mulxq 32(x), %rbx, %rbp mulxq 24(x), %rax, %rdx adcxq %rax, %rcx adoxq %rdx, %rbx movl $0, %eax adcxq %rax, %rbx adoxq %rax, %rbp adcq %rax, %rbp // Just for a clear fresh start for the flags; we don't use the zero xorq %rax, %rax // Double and add to the 00 + 11 + 22 + 33 + 44 + 55 terms // For one glorious moment the entire squaring result is all in the // register file as [%rsi;%rbp;%rbx;%rcx;%r15;...;%r8] // (since we've now finished with x we can re-use %rsi) movq (x), %rdx mulxq (x), %r8, %rax adcxq %r9, %r9 adoxq %rax, %r9 movq 8(x), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 16(x), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 24(x), %rdx mulxq %rdx, %rax, %rdx adcxq %r14, %r14 adoxq %rax, %r14 adcxq %r15, %r15 adoxq %rdx, %r15 movq 32(x), %rdx mulxq %rdx, %rax, %rdx adcxq %rcx, %rcx adoxq %rax, %rcx adcxq %rbx, %rbx adoxq %rdx, %rbx movq 40(x), %rdx mulxq %rdx, %rax, %rsi adcxq %rbp, %rbp adoxq %rax, %rbp movl $0, %eax adcxq %rax, %rsi adoxq %rax, %rsi // We need just *one* more register as a temp for the Montgomery steps. // Since we are writing to the z buffer anyway, make use of that to stash %rbx. movq %rbx, (z) // Montgomery reduce the %r13,...,%r8 window 6 times montreds(%r8,%r13,%r12,%r11,%r10,%r9,%r8) montreds(%r9,%r8,%r13,%r12,%r11,%r10,%r9) montreds(%r10,%r9,%r8,%r13,%r12,%r11,%r10) montreds(%r11,%r10,%r9,%r8,%r13,%r12,%r11) montreds(%r12,%r11,%r10,%r9,%r8,%r13,%r12) montreds(%r13,%r12,%r11,%r10,%r9,%r8,%r13) // Now we can safely restore %rbx before accumulating movq (z), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rsi movl $0, %r8d adcq %r8, %r8 // We now have a pre-reduced 7-word form z = [%r8; %rsi;%rbp;%rbx;%rcx;%r15;%r14] // Next, accumulate in different registers z - p_384, or more precisely // // [%r8; %r13;%r12;%r11;%r10;%r9;%rax] = z + (2^384 - p_384) xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movq $0xffffffff00000001, %rax addq %r14, %rax movl $0x00000000ffffffff, %r9d adcq %r15, %r9 movl $0x0000000000000001, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rsi, %r13 adcq $0, %r8 // ~ZF <=> %r12 >= 1 <=> z + (2^384 - p_384) >= 2^384 <=> z >= p_384, which // determines whether to use the further reduced argument or the original z. cmovnzq %rax, %r14 cmovnzq %r9, %r15 cmovnzq %r10, %rcx cmovnzq %r11, %rbx cmovnzq %r12, %rbp cmovnzq %r13, %rsi // Write back the result movq %r14, (z) movq %r15, 8(z) movq %rcx, 16(z) movq %rbx, 24(z) movq %rbp, 32(z) movq %rsi, 40(z) // Restore registers and return CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,622
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_double_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Double modulo p_384, z := (2 * x) mod p_384, assuming x reduced // Input x[6]; output z[6] // // extern void bignum_double_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_double_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_double_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_double_p384) .text #define z %rdi #define x %rsi #define d0 %rdx #define d1 %rcx #define d2 %r8 #define d3 %r9 #define d4 %r10 #define d5 %r11 #define c %rax // Re-use the input pointer as a temporary once we're done #define a %rsi #define ashort %esi S2N_BN_SYMBOL(bignum_double_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Load the input and double it so that 2^384 * c + [d5;d4;d3;d2;d1;d0] = 2 * x // Could also consider using shld to decouple carries *or* combining this // and the next block into a double carry chain with ADCX and ADOX. xorq c, c movq (x), d0 addq d0, d0 movq 8(x), d1 adcq d1, d1 movq 16(x), d2 adcq d2, d2 movq 24(x), d3 adcq d3, d3 movq 32(x), d4 adcq d4, d4 movq 40(x), d5 adcq d5, d5 adcq c, c // Now subtract p_384 from 2^384 * c + [d5;d4;d3;d2;d1;d0] to get 2 * x - p_384 // This is actually done by *adding* the 7-word negation r_384 = 2^448 - p_384 // where r_384 = [-1; 0; 0; 0; 1; 0x00000000ffffffff; 0xffffffff00000001] movq $0xffffffff00000001, a addq a, d0 movl $0x00000000ffffffff, ashort adcq a, d1 adcq $1, d2 adcq $0, d3 adcq $0, d4 adcq $0, d5 adcq $-1, c // Since by hypothesis x < p_384 we know 2 * x - p_384 < 2^384, so the top // carry c actually gives us a bitmask for 2 * x - p_384 < 0, which we // now use to make r' = mask * (2^384 - p_384) for a compensating subtraction. // We don't quite have enough ABI-modifiable registers to create all three // nonzero digits of r while maintaining d0..d5, but make the first two now. andq a, c // c = masked 0x00000000ffffffff xorq a, a subq c, a // a = masked 0xffffffff00000001 // Do the first two digits of addition and writeback subq a, d0 movq d0, (z) sbbq c, d1 movq d1, 8(z) // Preserve the carry chain while creating the extra masked digit since // the logical operation will clear CF sbbq d0, d0 andq a, c // c = masked 0x0000000000000001 negq d0 // Do the rest of the addition and writeback sbbq c, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_double_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
42,990
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/p384_montjmixadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjmixadd_alt(uint64_t p3[static 18], // const uint64_t p1[static 18], // const uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. // // Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2 // Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjmixadd_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjmixadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjmixadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Pointer-offset pairs for inputs and outputs // These assume %rdi = p3, %rsi = p1 and %rcx = p2, // which needs to be set up explicitly before use. // However the %rdi value never changes. #define x_1 0(%rsi) #define y_1 NUMSIZE(%rsi) #define z_1 (2*NUMSIZE)(%rsi) #define x_2 0(%rcx) #define y_2 NUMSIZE(%rcx) #define x_3 0(%rdi) #define y_3 NUMSIZE(%rdi) #define z_3 (2*NUMSIZE)(%rdi) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define zp2 (NUMSIZE*0)(%rsp) #define ww (NUMSIZE*0)(%rsp) #define resx (NUMSIZE*0)(%rsp) #define yd (NUMSIZE*1)(%rsp) #define y2a (NUMSIZE*1)(%rsp) #define x2a (NUMSIZE*2)(%rsp) #define zzx2 (NUMSIZE*2)(%rsp) #define zz (NUMSIZE*3)(%rsp) #define t1 (NUMSIZE*3)(%rsp) #define t2 (NUMSIZE*4)(%rsp) #define zzx1 (NUMSIZE*4)(%rsp) #define resy (NUMSIZE*4)(%rsp) #define xd (NUMSIZE*5)(%rsp) #define resz (NUMSIZE*5)(%rsp) // Temporaries for the actual input pointers #define input_x (NUMSIZE*6)(%rsp) #define input_y (NUMSIZE*6+8)(%rsp) #define NSPACE 304 // Corresponds exactly to bignum_montmul_p384_alt #define montmul_p384(P0,P1,P2) \ movq P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ xorl %r10d, %r10d ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ xorl %r11d, %r11d ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ xorl %r12d, %r12d ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ xorl %r13d, %r13d ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ xorl %r14d, %r14d ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ xorl %r15d, %r15d ; \ movq %r8, %rbx ; \ shlq $0x20, %rbx ; \ addq %r8, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r8 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r8, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r9 ; \ sbbq %rdx, %r10 ; \ sbbq %rbp, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x8+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %r8, %r8 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %r8, %r8 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %r8, %r8 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r8, %r8 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r8, %r8 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r8, %r8 ; \ negq %r8; \ movq %r9, %rbx ; \ shlq $0x20, %rbx ; \ addq %r9, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r9 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r9, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r10 ; \ sbbq %rdx, %r11 ; \ sbbq %rbp, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r15 ; \ adcq $0x0, %r8 ; \ movq 0x10+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %r9, %r9 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %r9, %r9 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r9, %r9 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r9, %r9 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r9, %r9 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r9, %r9 ; \ negq %r9; \ movq %r10, %rbx ; \ shlq $0x20, %rbx ; \ addq %r10, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r10 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r10, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r11 ; \ sbbq %rdx, %r12 ; \ sbbq %rbp, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r8 ; \ adcq $0x0, %r9 ; \ movq 0x18+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %r10, %r10 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r10, %r10 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r10, %r10 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r10, %r10 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r10, %r10 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %r10, %r10 ; \ negq %r10; \ movq %r11, %rbx ; \ shlq $0x20, %rbx ; \ addq %r11, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r11 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r11, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r12 ; \ sbbq %rdx, %r13 ; \ sbbq %rbp, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r9 ; \ adcq $0x0, %r10 ; \ movq 0x20+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r11, %r11 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r11, %r11 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r11, %r11 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r11, %r11 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %r11, %r11 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %r11, %r11 ; \ negq %r11; \ movq %r12, %rbx ; \ shlq $0x20, %rbx ; \ addq %r12, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r12 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r12, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r13 ; \ sbbq %rdx, %r14 ; \ sbbq %rbp, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r10 ; \ adcq $0x0, %r11 ; \ movq 0x28+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r12, %r12 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r12, %r12 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %r12, %r12 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %r12, %r12 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %r12, %r12 ; \ negq %r12; \ movq %r13, %rbx ; \ shlq $0x20, %rbx ; \ addq %r13, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r13 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r13, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r14 ; \ sbbq %rdx, %r15 ; \ sbbq %rbp, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r11 ; \ adcq $0x0, %r12 ; \ xorl %edx, %edx ; \ xorl %ebp, %ebp ; \ xorl %r13d, %r13d ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %ebx ; \ adcq %r15, %rbx ; \ movl $0x1, %ecx ; \ adcq %r8, %rcx ; \ adcq %r9, %rdx ; \ adcq %r10, %rbp ; \ adcq %r11, %r13 ; \ adcq $0x0, %r12 ; \ cmovneq %rax, %r14 ; \ cmovneq %rbx, %r15 ; \ cmovneq %rcx, %r8 ; \ cmovneq %rdx, %r9 ; \ cmovneq %rbp, %r10 ; \ cmovneq %r13, %r11 ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %r8, 0x10+P0 ; \ movq %r9, 0x18+P0 ; \ movq %r10, 0x20+P0 ; \ movq %r11, 0x28+P0 // Corresponds exactly to bignum_montsqr_p384_alt #define montsqr_p384(P0,P1) \ movq P1, %rbx ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ movq %rax, %r9 ; \ movq %rdx, %r10 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ movq %rax, %r11 ; \ movq %rdx, %r12 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ movq %rax, %r13 ; \ movq %rdx, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x20+P1; \ movq %rax, %r15 ; \ movq %rdx, %rcx ; \ movq 0x10+P1, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rbp, %rbp ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %rbp, %rbp ; \ movq 0x8+P1, %rbx ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %rbp, %rbp ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %rbp, %rbp ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ adcq $0x0, %rcx ; \ movq 0x20+P1, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %rbp, %rbp ; \ movq 0x10+P1, %rbx ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %rbp, %rbp ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %rbp, %rbp ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %rcx ; \ sbbq %rbp, %rbp ; \ xorl %ebx, %ebx ; \ movq 0x18+P1, %rax ; \ mulq 0x28+P1; \ subq %rbp, %rdx ; \ xorl %ebp, %ebp ; \ addq %rax, %rcx ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ movq 0x20+P1, %rax ; \ mulq 0x28+P1; \ addq %rax, %rbx ; \ adcq %rdx, %rbp ; \ xorl %r8d, %r8d ; \ addq %r9, %r9 ; \ adcq %r10, %r10 ; \ adcq %r11, %r11 ; \ adcq %r12, %r12 ; \ adcq %r13, %r13 ; \ adcq %r14, %r14 ; \ adcq %r15, %r15 ; \ adcq %rcx, %rcx ; \ adcq %rbx, %rbx ; \ adcq %rbp, %rbp ; \ adcl %r8d, %r8d ; \ movq P1, %rax ; \ mulq %rax; \ movq %r8, P0 ; \ movq %rax, %r8 ; \ movq 0x8+P1, %rax ; \ movq %rbp, 0x8+P0 ; \ addq %rdx, %r9 ; \ sbbq %rbp, %rbp ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rbp, %rbp ; \ movq 0x10+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %rbp, %rbp ; \ movq 0x18+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %rbp, %rbp ; \ movq 0x20+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %rcx ; \ adcq %rdx, %rbx ; \ sbbq %rbp, %rbp ; \ movq 0x28+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq 0x8+P0, %rax ; \ adcq P0, %rdx ; \ movq %rax, %rbp ; \ movq %rdx, %rsi ; \ movq %rbx, P0 ; \ movq %r8, %rbx ; \ shlq $0x20, %rbx ; \ addq %r8, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r8 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r8 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r8, %r9 ; \ sbbq %rdx, %r10 ; \ sbbq %rax, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ movq %rbx, %r8 ; \ sbbq $0x0, %r8 ; \ movq %r9, %rbx ; \ shlq $0x20, %rbx ; \ addq %r9, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r9 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r9 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r9, %r10 ; \ sbbq %rdx, %r11 ; \ sbbq %rax, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r8 ; \ movq %rbx, %r9 ; \ sbbq $0x0, %r9 ; \ movq %r10, %rbx ; \ shlq $0x20, %rbx ; \ addq %r10, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r10 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r10 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r10, %r11 ; \ sbbq %rdx, %r12 ; \ sbbq %rax, %r13 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ movq %rbx, %r10 ; \ sbbq $0x0, %r10 ; \ movq %r11, %rbx ; \ shlq $0x20, %rbx ; \ addq %r11, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r11 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r11 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r11, %r12 ; \ sbbq %rdx, %r13 ; \ sbbq %rax, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ movq %rbx, %r11 ; \ sbbq $0x0, %r11 ; \ movq %r12, %rbx ; \ shlq $0x20, %rbx ; \ addq %r12, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r12 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r12 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r12, %r13 ; \ sbbq %rdx, %r8 ; \ sbbq %rax, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %r11 ; \ movq %rbx, %r12 ; \ sbbq $0x0, %r12 ; \ movq %r13, %rbx ; \ shlq $0x20, %rbx ; \ addq %r13, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r13 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r13 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r13, %r8 ; \ sbbq %rdx, %r9 ; \ sbbq %rax, %r10 ; \ sbbq $0x0, %r11 ; \ sbbq $0x0, %r12 ; \ movq %rbx, %r13 ; \ sbbq $0x0, %r13 ; \ movq P0, %rbx ; \ addq %r8, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r11, %rbx ; \ adcq %r12, %rbp ; \ adcq %r13, %rsi ; \ movl $0x0, %r8d ; \ adcq %r8, %r8 ; \ xorq %r11, %r11 ; \ xorq %r12, %r12 ; \ xorq %r13, %r13 ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %r9d ; \ adcq %r15, %r9 ; \ movl $0x1, %r10d ; \ adcq %rcx, %r10 ; \ adcq %rbx, %r11 ; \ adcq %rbp, %r12 ; \ adcq %rsi, %r13 ; \ adcq $0x0, %r8 ; \ cmovneq %rax, %r14 ; \ cmovneq %r9, %r15 ; \ cmovneq %r10, %rcx ; \ cmovneq %r11, %rbx ; \ cmovneq %r12, %rbp ; \ cmovneq %r13, %rsi ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %rcx, 0x10+P0 ; \ movq %rbx, 0x18+P0 ; \ movq %rbp, 0x20+P0 ; \ movq %rsi, 0x28+P0 // Corresponds exactly to bignum_sub_p384 #define sub_p384(P0,P1,P2) \ movq P1, %rax ; \ subq P2, %rax ; \ movq 0x8+P1, %rdx ; \ sbbq 0x8+P2, %rdx ; \ movq 0x10+P1, %r8 ; \ sbbq 0x10+P2, %r8 ; \ movq 0x18+P1, %r9 ; \ sbbq 0x18+P2, %r9 ; \ movq 0x20+P1, %r10 ; \ sbbq 0x20+P2, %r10 ; \ movq 0x28+P1, %r11 ; \ sbbq 0x28+P2, %r11 ; \ sbbq %rcx, %rcx ; \ movl $0xffffffff, %esi ; \ andq %rsi, %rcx ; \ xorq %rsi, %rsi ; \ subq %rcx, %rsi ; \ subq %rsi, %rax ; \ movq %rax, P0 ; \ sbbq %rcx, %rdx ; \ movq %rdx, 0x8+P0 ; \ sbbq %rax, %rax ; \ andq %rsi, %rcx ; \ negq %rax; \ sbbq %rcx, %r8 ; \ movq %r8, 0x10+P0 ; \ sbbq $0x0, %r9 ; \ movq %r9, 0x18+P0 ; \ sbbq $0x0, %r10 ; \ movq %r10, 0x20+P0 ; \ sbbq $0x0, %r11 ; \ movq %r11, 0x28+P0 // Additional macros to help with final multiplexing #define testzero6(P) \ movq P, %rax ; \ movq 8+P, %rdx ; \ orq 16+P, %rax ; \ orq 24+P, %rdx ; \ orq 32+P, %rax ; \ orq 40+P, %rdx ; \ orq %rdx, %rax #define mux6(r0,r1,r2,r3,r4,r5,PNE,PEQ) \ movq PEQ, %rax ; \ movq PNE, r0 ; \ cmovzq %rax, r0 ; \ movq 8+PEQ, %rax ; \ movq 8+PNE, r1 ; \ cmovzq %rax, r1 ; \ movq 16+PEQ, %rax ; \ movq 16+PNE, r2 ; \ cmovzq %rax, r2 ; \ movq 24+PEQ, %rax ; \ movq 24+PNE, r3 ; \ cmovzq %rax, r3 ; \ movq 32+PEQ, %rax ; \ movq 32+PNE, r4 ; \ cmovzq %rax, r4 ; \ movq 40+PEQ, %rax ; \ movq 40+PNE, r5 ; \ cmovzq %rax, r5 #define load6(r0,r1,r2,r3,r4,r5,P) \ movq P, r0 ; \ movq 8+P, r1 ; \ movq 16+P, r2 ; \ movq 24+P, r3 ; \ movq 32+P, r4 ; \ movq 40+P, r5 #define store6(P,r0,r1,r2,r3,r4,r5) \ movq r0, P ; \ movq r1, 8+P ; \ movq r2, 16+P ; \ movq r3, 24+P ; \ movq r4, 32+P ; \ movq r5, 40+P S2N_BN_SYMBOL(p384_montjmixadd_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save registers and make room on stack for temporary variables // Put the input arguments in non-volatile places on the stack CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) CFI_DEC_RSP(NSPACE) movq %rsi, input_x movq %rdx, input_y // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract montsqr_p384(zp2,z_1) movq input_x, %rsi movq input_y, %rcx montmul_p384(y2a,z_1,y_2) movq input_y, %rcx montmul_p384(x2a,zp2,x_2) montmul_p384(y2a,zp2,y2a) movq input_x, %rsi sub_p384(xd,x2a,x_1) movq input_x, %rsi sub_p384(yd,y2a,y_1) montsqr_p384(zz,xd) montsqr_p384(ww,yd) movq input_x, %rsi montmul_p384(zzx1,zz,x_1) montmul_p384(zzx2,zz,x2a) sub_p384(resx,ww,zzx1) sub_p384(t1,zzx2,zzx1) movq input_x, %rsi montmul_p384(resz,xd,z_1) sub_p384(resx,resx,zzx2) sub_p384(t2,zzx1,resx) movq input_x, %rsi montmul_p384(t1,t1,y_1) montmul_p384(t2,yd,t2) sub_p384(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) movq input_x, %rsi testzero6(z_1) // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in // Montgomery form so not the simple constant 1 but rather 2^384 - p_384), // hence giving 0 + p2 = p2 for the final result. movq input_y, %rcx mux6(%r8,%r9,%r10,%r11,%rbx,%rbp,resx,x_2) mux6(%r12,%r13,%r14,%r15,%rdx,%rcx,resy,y_2) store6(x_3,%r8,%r9,%r10,%r11,%rbx,%rbp) store6(y_3,%r12,%r13,%r14,%r15,%rdx,%rcx) load6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz) movq $0xffffffff00000001, %rax cmovzq %rax, %r8 movl $0x00000000ffffffff, %eax cmovzq %rax, %r9 movq $1, %rax cmovzq %rax, %r10 movl $0, %eax cmovzq %rax, %r11 cmovzq %rax, %rbx cmovzq %rax, %rbp store6(z_3,%r8,%r9,%r10,%r11,%rbx,%rbp) // Restore stack and registers CFI_INC_RSP(NSPACE) CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(p384_montjmixadd_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
9,217
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_tomont_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert to Montgomery form z := (2^384 * x) mod p_384 // Input x[6]; output z[6] // // extern void bignum_tomont_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p384) .text #define z %rdi #define x %rsi // Fairly consistently used as a zero register #define zero %rbp // Some temp registers for the last correction stage #define d %rax #define u %rdx #define v %rcx #define w %rsi #define vshort %ecx #define wshort %esi // Add %rdx * m into a register-pair (high,low) // maintaining consistent double-carrying with adcx and adox, // using %rax and %rcx as temporaries #define mulpadd(high,low,m) \ mulxq m, %rax, %rcx ; \ adcxq %rax, low ; \ adoxq %rcx, high // Core one-step Montgomery reduction macro. Takes input in // [d7;d6;d5;d4;d3;d2;d1;d0] and returns result in [d7;d6;d5;d4;d3;d2;d1], // adding to the existing contents, re-using d0 as a temporary internally // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // // montredc(d7,d6,d5,d4,d3,d2,d1,d0) // // This particular variant, with its mix of addition and subtraction // at the top, is not intended to maintain a coherent carry or borrow out. // It is assumed the final result would fit in [d7;d6;d5;d4;d3;d2;d1]. // which is always the case here as the top word is even always in {0,1} #define montredc(d7,d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rdx ; \ shlq $32, %rdx ; \ addq d0, %rdx ; \ /* Construct [%rbp;%rcx;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 as a temp */ \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rcx, %rax ; \ movl $0x00000000ffffffff, %ecx ; \ mulxq %rcx, d0, %rcx ; \ adcq d0, %rax ; \ adcq %rdx, %rcx ; \ adcl %ebp, %ebp ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq %rcx, d2 ; \ sbbq %rbp, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ sbbq $0, %rdx ; \ addq %rdx, d6 ; \ adcq $0, d7 S2N_BN_SYMBOL(bignum_tomont_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // We are essentially just doing a Montgomery multiplication of x and the // precomputed constant y = 2^768 mod p, so the code is almost the same // modulo a few registers and the change from loading y[i] to using constants, // plus the easy digits y[4] = 1 and y[5] = 0 being treated specially. // Because there is no y pointer to keep, we use one register less. CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Do row 0 computation, which is a bit different: // set up initial window [%r14,%r13,%r12,%r11,%r10,%r9,%r8] = y[0] * x // Unlike later, we only need a single carry chain movq $0xfffffffe00000001, %rdx mulxq (x), %r8, %r9 mulxq 8(x), %rcx, %r10 addq %rcx, %r9 mulxq 16(x), %rcx, %r11 adcq %rcx, %r10 mulxq 24(x), %rcx, %r12 adcq %rcx, %r11 mulxq 32(x), %rcx, %r13 adcq %rcx, %r12 mulxq 40(x), %rcx, %r14 adcq %rcx, %r13 adcq $0, %r14 // Montgomery reduce the zeroth window xorq %r15, %r15 montredc(%r15, %r14,%r13,%r12,%r11,%r10,%r9,%r8) // Add row 1 xorq zero, zero movq $0x0000000200000000, %rdx xorq %r8, %r8 mulpadd(%r10,%r9,(x)) mulpadd(%r11,%r10,8(x)) mulpadd(%r12,%r11,16(x)) mulpadd(%r13,%r12,24(x)) mulpadd(%r14,%r13,32(x)) mulpadd(%r15,%r14,40(x)) adcxq zero, %r15 adoxq zero, %r8 adcxq zero, %r8 // Montgomery reduce window 1 montredc(%r8, %r15,%r14,%r13,%r12,%r11,%r10,%r9) // Add row 2 xorq zero, zero movq $0xfffffffe00000000, %rdx xorq %r9, %r9 mulpadd(%r11,%r10,(x)) mulpadd(%r12,%r11,8(x)) mulpadd(%r13,%r12,16(x)) mulpadd(%r14,%r13,24(x)) mulpadd(%r15,%r14,32(x)) mulpadd(%r8,%r15,40(x)) adcxq zero, %r8 adoxq zero, %r9 adcxq zero, %r9 // Montgomery reduce window 2 montredc(%r9, %r8,%r15,%r14,%r13,%r12,%r11,%r10) // Add row 3 xorq zero, zero movq $0x0000000200000000, %rdx xorq %r10, %r10 mulpadd(%r12,%r11,(x)) mulpadd(%r13,%r12,8(x)) mulpadd(%r14,%r13,16(x)) mulpadd(%r15,%r14,24(x)) mulpadd(%r8,%r15,32(x)) mulpadd(%r9,%r8,40(x)) adcxq zero, %r9 adoxq zero, %r10 adcxq zero, %r10 // Montgomery reduce window 3 montredc(%r10, %r9,%r8,%r15,%r14,%r13,%r12,%r11) // Add row 4. The multiplier y[4] = 1, so we just add x to the window // while extending it with one more digit, initially this carry xorq %r11, %r11 addq (x), %r12 adcq 8(x), %r13 adcq 16(x), %r14 adcq 24(x), %r15 adcq 32(x), %r8 adcq 40(x), %r9 adcq $0, %r10 adcq $0, %r11 // Montgomery reduce window 4 montredc(%r11, %r10,%r9,%r8,%r15,%r14,%r13,%r12) // Add row 5, The multiplier y[5] = 0, so this is trivial: all we do is // bring down another zero digit into the window. xorq %r12, %r12 // Montgomery reduce window 5 montredc(%r12, %r11,%r10,%r9,%r8,%r15,%r14,%r13) // We now have a pre-reduced 7-word form [%r12;%r11;%r10;%r9;%r8;%r15;%r14] // We know, writing B = 2^{6*64} that the full implicit result is // B^2 c <= z + (B - 1) * p < B * p + (B - 1) * p < 2 * B * p, // so the top half is certainly < 2 * p. If c = 1 already, we know // subtracting p will give the reduced modulus. But now we do a // comparison to catch cases where the residue is >= p. // First set [0;0;0;w;v;u] = 2^384 - p_384 movq $0xffffffff00000001, u movl $0x00000000ffffffff, vshort movl $0x0000000000000001, wshort // Let dd = [%r11;%r10;%r9;%r8;%r15;%r14] be the topless 6-word intermediate result. // Set CF if the addition dd + (2^384 - p_384) >= 2^384, hence iff dd >= p_384. movq %r14, d addq u, d movq %r15, d adcq v, d movq %r8, d adcq w, d movq %r9, d adcq $0, d movq %r10, d adcq $0, d movq %r11, d adcq $0, d // Now just add this new carry into the existing %r12. It's easy to see they // can't both be 1 by our range assumptions, so this gives us a {0,1} flag adcq $0, %r12 // Now convert it into a bitmask negq %r12 // Masked addition of 2^384 - p_384, hence subtraction of p_384 andq %r12, u andq %r12, v andq %r12, w addq u, %r14 adcq v, %r15 adcq w, %r8 adcq $0, %r9 adcq $0, %r10 adcq $0, %r11 // Write back the result movq %r14, (z) movq %r15, 8(z) movq %r8, 16(z) movq %r9, 24(z) movq %r10, 32(z) movq %r11, 40(z) // Restore registers and return CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_tomont_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,974
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_mod_p384_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo field characteristic, z := x mod p_384 // Input x[6]; output z[6] // // extern void bignum_mod_p384_6(uint64_t z[static 6], const uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_p384_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384_6) .text #define z %rdi #define x %rsi #define d0 %rdx #define d1 %rcx #define d2 %r8 #define d3 %r9 #define d4 %r10 #define d5 %r11 #define c %rax #define cshort %eax // Re-use the input pointer as a temporary once we're done #define a %rsi S2N_BN_SYMBOL(bignum_mod_p384_6): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Load the input and subtract p_384 from it movq (x), d0 movl $0x00000000ffffffff, cshort subq c, d0 movq 8(x), d1 notq c sbbq c, d1 movq 16(x), d2 sbbq $-2, d2 movq 24(x), d3 sbbq $-1, d3 movq 32(x), d4 sbbq $-1, d4 movq 40(x), d5 sbbq $-1, d5 // Capture the top carry as a bitmask to indicate we need to add p_384 back on, // which we actually do in a more convenient way by subtracting r_384 // where r_384 = [-1; 0; 0; 0; 1; 0x00000000ffffffff; 0xffffffff00000001] // We don't quite have enough ABI-modifiable registers to create all three // nonzero digits of r while maintaining d0..d5, but make the first two now. notq c sbbq a, a andq a, c // c = masked 0x00000000ffffffff xorq a, a subq c, a // a = masked 0xffffffff00000001 // Do the first two digits of addition and writeback subq a, d0 movq d0, (z) sbbq c, d1 movq d1, 8(z) // Preserve the carry chain while creating the extra masked digit since // the logical operation will clear CF sbbq d0, d0 andq a, c // c = masked 0x0000000000000001 negq d0 // Do the rest of the addition and writeback sbbq c, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_p384_6) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,593
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_littleendian_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert 6-digit (384-bit) bignum to/from little-endian form // Input x[6]; output z[6] // // extern void bignum_littleendian_6(uint64_t z[static 6], // const uint64_t x[static 6]); // // The same function is given two other prototypes whose names reflect the // treatment of one or other argument as a byte array rather than word array: // // extern void bignum_fromlebytes_6(uint64_t z[static 6], // const uint8_t x[static 48]); // // extern void bignum_tolebytes_6(uint8_t z[static 48], // const uint64_t x[static 6]); // // Since x86 is little-endian, this is just copying. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_littleendian_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_littleendian_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_littleendian_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_fromlebytes_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_fromlebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_fromlebytes_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tolebytes_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tolebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tolebytes_6) .text #define z %rdi #define x %rsi #define a %rax S2N_BN_SYMBOL(bignum_littleendian_6): S2N_BN_SYMBOL(bignum_fromlebytes_6): S2N_BN_SYMBOL(bignum_tolebytes_6): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif movq (x), a movq a, (z) movq 8(x), a movq a, 8(z) movq 16(x), a movq a, 16(z) movq 24(x), a movq a, 24(z) movq 32(x), a movq a, 32(z) movq 40(x), a movq a, 40(z) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_littleendian_6) S2N_BN_SIZE_DIRECTIVE(bignum_fromlebytes_6) S2N_BN_SIZE_DIRECTIVE(bignum_tolebytes_6) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,861
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_triple_p384_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Triple modulo p_384, z := (3 * x) mod p_384 // Input x[6]; output z[6] // // extern void bignum_triple_p384_alt(uint64_t z[static 6], // const uint64_t x[static 6]); // // The input x can be any 6-digit bignum, not necessarily reduced modulo p_384, // and the result is always fully reduced, i.e. z = (3 * x) mod p_384. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p384_alt) .text #define z %rdi #define x %rsi #define d0 %r8 #define d1 %r9 #define d2 %r10 #define d3 %r11 #define d4 %rbx #define d5 %rsi #define a %rax #define c %rcx #define q %rcx #define d %rdx #define ashort %eax #define cshort %ecx #define qshort %ecx #define dshort %edx S2N_BN_SYMBOL(bignum_triple_p384_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // We seem to need (just!) one extra register, which we need to save and restore CFI_PUSH(%rbx) // Multiply, accumulating the result as 2^384 * h + [d5;d4;d3;d2;d1;d0] // but actually immediately producing q = h + 1, our quotient approximation, // by adding 1 to it. movl $3, cshort movq (x), a mulq c movq a, d0 movq d, d1 movq 8(x), a xorq d2, d2 mulq c addq a, d1 adcq d, d2 movq 16(x), a xorq d3, d3 mulq c addq a, d2 adcq d, d3 movq 24(x), a xorq d4, d4 mulq c addq a, d3 adcq d, d4 movq 32(x), a mulq c addq a, d4 adcq $0, d movq 40(x), a movq d, d5 mulq c addq a, d5 movl $1, qshort adcq d, q // Initial subtraction of z - q * p_384, with bitmask c for the carry // Actually done as an addition of (z - 2^384 * h) + q * (2^384 - p_384) // which, because q = h + 1, is exactly 2^384 + (z - q * p_384), and // therefore CF <=> 2^384 + (z - q * p_384) >= 2^384 <=> z >= q * p_384. movq q, d shlq $32, d movq q, a subq d, a sbbq $0, d addq a, d0 adcq d, d1 adcq q, d2 adcq $0, d3 adcq $0, d4 adcq $0, d5 sbbq d, d notq d // Now use that mask for a masked addition of p_384, which again is in // fact done by a masked subtraction of 2^384 - p_384, so that we only // have three nonzero digits and so can avoid using another register. movl $0x00000000ffffffff, qshort xorl ashort, ashort andq d, q subq q, a negq d subq a, d0 movq d0, (z) sbbq q, d1 movq d1, 8(z) sbbq d, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) // Return CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_triple_p384_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,640
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_deamont_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from almost-Montgomery form, z := (x / 2^384) mod p_384 // Input x[6]; output z[6] // // extern void bignum_deamont_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // Convert a 6-digit bignum x out of its (optionally almost) Montgomery form, // "almost" meaning any 6-digit input will work, with no range restriction. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_deamont_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p384) .text #define z %rdi #define x %rsi // Additional temps in the correction phase #define u %rax #define v %rcx #define w %rdx #define vshort %ecx // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing contents of [d5;d4;d3;d2;d1;d0]. This // is intended only for 6-word inputs as in mapping out of Montgomery, // not for the general case of Montgomery multiplication. It is fine // for d6 to be the same register as d0. // // Parms: montreds(d6,d5,d4,d3,d2,d1,d0) // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 #define montreds(d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rdx ; \ shlq $32, %rdx ; \ addq d0, %rdx ; \ /* Construct [%rsi;%rcx;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 */ \ /* as a temp. */ \ xorq %rsi, %rsi ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rcx, %rax ; \ movl $0x00000000ffffffff, %ecx ; \ mulxq %rcx, d0, %rcx ; \ adcq d0, %rax ; \ adcq %rdx, %rcx ; \ adcq $0, %rsi ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq %rcx, d2 ; \ sbbq %rsi, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ movq %rdx, d6 ; \ sbbq $0, d6 S2N_BN_SYMBOL(bignum_deamont_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with CFI_PUSH(%r12) CFI_PUSH(%r13) // Set up an initial window [%r13,%r12,%r11,%r10,%r9,%r8] = x movq (x), %r8 movq 8(x), %r9 movq 16(x), %r10 movq 24(x), %r11 movq 32(x), %r12 movq 40(x), %r13 // Montgomery reduce window 0 montreds(%r8,%r13,%r12,%r11,%r10,%r9,%r8) // Montgomery reduce window 1 montreds(%r9,%r8,%r13,%r12,%r11,%r10,%r9) // Montgomery reduce window 2 montreds(%r10,%r9,%r8,%r13,%r12,%r11,%r10) // Montgomery reduce window 3 montreds(%r11,%r10,%r9,%r8,%r13,%r12,%r11) // Montgomery reduce window 4 montreds(%r12,%r11,%r10,%r9,%r8,%r13,%r12) // Montgomery reduce window 5 montreds(%r13,%r12,%r11,%r10,%r9,%r8,%r13) // Do a test addition of dd = [%r13;%r12;%r11;%r10;%r9;%r8] and // 2^384 - p_384 = [0;0;0;1;v;u], hence setting CF iff // dd + (2^384 - p_384) >= 2^384, hence iff dd >= p_384. movq $0xffffffff00000001, u movl $0x00000000ffffffff, vshort movq %r8, w addq u, w movq %r9, w adcq v, w movq %r10, w adcq $1, w movq %r11, w adcq $0, w movq %r12, w adcq $0, w movq %r13, w adcq $0, w // Convert CF to a bitmask in w sbbq w, w // Masked addition of 2^384 - p_384, hence subtraction of p_384 andq w, u andq w, v andq $1, w addq u, %r8 adcq v, %r9 adcq w, %r10 adcq $0, %r11 adcq $0, %r12 adcq $0, %r13 // Write back the result movq %r8, (z) movq %r9, 8(z) movq %r10, 16(z) movq %r11, 24(z) movq %r12, 32(z) movq %r13, 40(z) // Restore registers and return CFI_POP(%r13) CFI_POP(%r12) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_deamont_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
10,580
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_montsqr_p384_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^384) mod p_384 // Input x[6]; output z[6] // // extern void bignum_montsqr_p384_alt(uint64_t z[static 6], // const uint64_t x[static 6]); // // Does z := (x^2 / 2^384) mod p_384, assuming x^2 <= 2^384 * p_384, which is // guaranteed in particular if x < p_384 initially (the "intended" case). // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p384_alt) .text #define z %rdi #define x %rsi // Some temp registers for the last correction stage #define d %rax #define u %rdx #define v %r10 #define w %r11 // A zero register, very often #define zero %rbp #define zeroe %ebp // Add %rbx * m into a register-pair (high,low) maintaining consistent // carry-catching with carry (negated, as bitmask) and using %rax and %rdx // as temporaries #define mulpadd(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ subq carry, %rdx ; \ addq %rax, low ; \ adcq %rdx, high ; \ sbbq carry, carry // Initial version assuming no carry-in #define mulpadi(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ addq %rax, low ; \ adcq %rdx, high ; \ sbbq carry, carry // End version not catching the top carry-out #define mulpade(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ subq carry, %rdx ; \ addq %rax, low ; \ adcq %rdx, high // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing [d5;d4;d3;d2;d1] and re-using d0 as a // temporary internally, as well as %rax, %rbx and %rdx. // It is OK for d6 and d0 to be the same register (they often are) // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // // montreds(d6,d5,d4,d3,d2,d1,d0) #define montreds(d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rbx ; \ shlq $32, %rbx ; \ addq d0, %rbx ; \ /* Construct [%rax;%rdx;d0;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 */ \ /* and %rbx as temps. */ \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, d0 ; \ movq $0x00000000ffffffff, %rax ; \ mulq %rbx; \ addq %rax, d0 ; \ movl $0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ /* Now subtract that and add 2^384 * w */ \ subq d0, d1 ; \ sbbq %rdx, d2 ; \ sbbq %rax, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ movq %rbx, d6 ; \ sbbq $0, d6 S2N_BN_SYMBOL(bignum_montsqr_p384_alt): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Set up an initial window [%rcx;%r15;...%r9] = [34;05;03;01] // Note that we are using %rcx as the first step past the rotating window movq (x), %rbx movq 8(x), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 24(x), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 40(x), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 24(x), %rax mulq 32(x) movq %rax, %r15 movq %rdx, %rcx // Chain in the addition of 02 + 12 + 13 + 14 + 15 to that window // (no carry-out possible) movq 16(x), %rbx mulpadi(%rbp,%r11,%r10,(x)) mulpadd(%rbp,%r12,%r11,8(x)) movq 8(x), %rbx mulpadd(%rbp,%r13,%r12,24(x)) mulpadd(%rbp,%r14,%r13,32(x)) mulpade(%rbp,%r15,%r14,40(x)) adcq $0, %rcx // Now chain in the 04 + 23 + 24 + 25 + 35 + 45 terms // We are running out of registers in our rotating window, so we start // using %rbx (and hence need care with using mulpadd after this). Thus // our result so far is in [%rbp;%rbx;%rcx;%r15;...%r9] movq 32(x), %rbx mulpadi(%rbp,%r13,%r12,(x)) movq 16(x), %rbx mulpadd(%rbp,%r14,%r13,24(x)) mulpadd(%rbp,%r15,%r14,32(x)) mulpadd(%rbp,%rcx,%r15,40(x)) xorl %ebx, %ebx movq 24(x), %rax mulq 40(x) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 32(x), %rax mulq 40(x) addq %rax, %rbx adcq %rdx, %rbp // Double the window as [%r8;%rbp;%rbx;%rcx;%r15;...%r9] xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d // Add the doubled window to the 00 + 11 + 22 + 33 + 44 + 55 terms // For one glorious moment the entire squaring result is all in the // register file as [%rsi;%rbp;%rbx;%rcx;%r15;...;%r8] // (since we've now finished with x we can re-use %rsi). But since // we are so close to running out of registers, we do a bit of // reshuffling and temporary storage in the output buffer. movq (x), %rax mulq %rax movq %r8, (z) movq %rax, %r8 movq 8(x), %rax movq %rbp, 8(z) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 16(x), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 24(x), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 32(x), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 40(x), %rax mulq %rax negq %rbp adcq 8(z), %rax adcq (z), %rdx movq %rax, %rbp movq %rdx, %rsi // We need just *one* more register as a temp for the Montgomery steps. // Since we are writing to the z buffer anyway, make use of that again // to stash %rbx. movq %rbx, (z) // Montgomery reduce the %r13,...,%r8 window 6 times montreds(%r8,%r13,%r12,%r11,%r10,%r9,%r8) montreds(%r9,%r8,%r13,%r12,%r11,%r10,%r9) montreds(%r10,%r9,%r8,%r13,%r12,%r11,%r10) montreds(%r11,%r10,%r9,%r8,%r13,%r12,%r11) montreds(%r12,%r11,%r10,%r9,%r8,%r13,%r12) montreds(%r13,%r12,%r11,%r10,%r9,%r8,%r13) // Now we can safely restore %rbx before accumulating movq (z), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rsi movl $0, %r8d adcq %r8, %r8 // We now have a pre-reduced 7-word form z = [%r8; %rsi;%rbp;%rbx;%rcx;%r15;%r14] // Next, accumulate in different registers z - p_384, or more precisely // // [%r8; %r13;%r12;%r11;%r10;%r9;%rax] = z + (2^384 - p_384) xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movq $0xffffffff00000001, %rax addq %r14, %rax movl $0x00000000ffffffff, %r9d adcq %r15, %r9 movl $0x0000000000000001, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rsi, %r13 adcq $0, %r8 // ~ZF <=> %r12 >= 1 <=> z + (2^384 - p_384) >= 2^384 <=> z >= p_384, which // determines whether to use the further reduced argument or the original z. cmovnzq %rax, %r14 cmovnzq %r9, %r15 cmovnzq %r10, %rcx cmovnzq %r11, %rbx cmovnzq %r12, %rbp cmovnzq %r13, %rsi // Write back the result movq %r14, (z) movq %r15, 8(z) movq %rcx, 16(z) movq %rbx, 24(z) movq %rbp, 32(z) movq %rsi, 40(z) // Restore registers and return CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p384_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,155
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_bigendian_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert 6-digit (384-bit) bignum to/from big-endian form // Input x[6]; output z[6] // // extern void bignum_bigendian_6(uint64_t z[static 6], // const uint64_t x[static 6]); // // The same function is given two other prototypes whose names reflect the // treatment of one or other argument as a byte array rather than word array: // // extern void bignum_frombebytes_6(uint64_t z[static 6], // const uint8_t x[static 48]); // // extern void bignum_tobebytes_6(uint8_t z[static 48], // const uint64_t x[static 6]); // // Since x86 is little-endian, and bignums are stored with little-endian // word order, this is simply byte reversal and is implemented as such. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_bigendian_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_bigendian_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_bigendian_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_frombebytes_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_frombebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_frombebytes_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tobebytes_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tobebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tobebytes_6) .text #define z %rdi #define x %rsi #define a %rax #define b %rdx // All loads and stores are word-sized, then we use BSWAP to // reverse the byte order, as well as switching round the word order // when writing back. The reads and writes are organized in mirror-image // pairs (0-5, 1-4, 2-3) to allow x and z to point to the same buffer // without using more intermediate registers. S2N_BN_SYMBOL(bignum_bigendian_6): S2N_BN_SYMBOL(bignum_frombebytes_6): S2N_BN_SYMBOL(bignum_tobebytes_6): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // 0 and 5 words movq (x), a movq 40(x), b bswapq a bswapq b movq a, 40(z) movq b, (z) // 1 and 4 words movq 8(x), a movq 32(x), b bswapq a bswapq b movq a, 32(z) movq b, 8(z) // 2 and 3 words movq 16(x), a movq 24(x), b bswapq a bswapq b movq a, 24(z) movq b, 16(z) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_bigendian_6) S2N_BN_SIZE_DIRECTIVE(bignum_frombebytes_6) S2N_BN_SIZE_DIRECTIVE(bignum_tobebytes_6) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,649
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_demont_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from Montgomery form z := (x / 2^384) mod p_384, assuming x reduced // Input x[6]; output z[6] // // extern void bignum_demont_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // This assumes the input is < p_384 for correctness. If this is not the case, // use the variant "bignum_deamont_p384" instead. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_demont_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p384) .text #define z %rdi #define x %rsi // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing contents of [d5;d4;d3;d2;d1;d0]. This // is intended only for 6-word inputs as in mapping out of Montgomery, // not for the general case of Montgomery multiplication. It is fine // for d6 to be the same register as d0. // // Parms: montreds(d6,d5,d4,d3,d2,d1,d0) // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 #define montreds(d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rdx ; \ shlq $32, %rdx ; \ addq d0, %rdx ; \ /* Construct [%rsi;%rcx;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 */ \ /* as a temp. */ \ xorq %rsi, %rsi ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rcx, %rax ; \ movl $0x00000000ffffffff, %ecx ; \ mulxq %rcx, d0, %rcx ; \ adcq d0, %rax ; \ adcq %rdx, %rcx ; \ adcq $0, %rsi ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq %rcx, d2 ; \ sbbq %rsi, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ movq %rdx, d6 ; \ sbbq $0, d6 S2N_BN_SYMBOL(bignum_demont_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with CFI_PUSH(%r12) CFI_PUSH(%r13) // Set up an initial window [%r13,%r12,%r11,%r10,%r9,%r8] = x movq (x), %r8 movq 8(x), %r9 movq 16(x), %r10 movq 24(x), %r11 movq 32(x), %r12 movq 40(x), %r13 // Montgomery reduce window 0 montreds(%r8,%r13,%r12,%r11,%r10,%r9,%r8) // Montgomery reduce window 1 montreds(%r9,%r8,%r13,%r12,%r11,%r10,%r9) // Montgomery reduce window 2 montreds(%r10,%r9,%r8,%r13,%r12,%r11,%r10) // Montgomery reduce window 3 montreds(%r11,%r10,%r9,%r8,%r13,%r12,%r11) // Montgomery reduce window 4 montreds(%r12,%r11,%r10,%r9,%r8,%r13,%r12) // Montgomery reduce window 5 montreds(%r13,%r12,%r11,%r10,%r9,%r8,%r13) // Write back the result movq %r8, (z) movq %r9, 8(z) movq %r10, 16(z) movq %r11, 24(z) movq %r12, 32(z) movq %r13, 40(z) // Restore registers and return CFI_POP(%r13) CFI_POP(%r12) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_demont_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,359
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_neg_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Negate modulo p_384, z := (-x) mod p_384, assuming x reduced // Input x[6]; output z[6] // // extern void bignum_neg_p384(uint64_t z[static 6], const uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_neg_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_p384) .text #define z %rdi #define x %rsi #define n0 %rax #define n1 %rcx #define n2 %rdx #define n3 %r8 #define n4 %r9 #define q %r10 #define n0short %eax S2N_BN_SYMBOL(bignum_neg_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Or together the input digits and create a bitmask q if this is nonzero, so // that we avoid doing -0 = p_384 and hence maintain strict modular reduction movq (x), n0 orq 8(x), n0 movq 16(x), n1 orq 24(x), n1 movq 32(x), n2 orq 40(x), n2 orq n1, n0 orq n2, n0 negq n0 sbbq q, q // Let [q;n4;n3;n2;n1;n0] = if q then p_384 else 0 movl $0x00000000ffffffff, n0short andq q, n0 movq $0xffffffff00000000, n1 andq q, n1 movq $0xfffffffffffffffe, n2 andq q, n2 movq q, n3 movq q, n4 // Do the subtraction subq (x), n0 sbbq 8(x), n1 sbbq 16(x), n2 sbbq 24(x), n3 sbbq 32(x), n4 sbbq 40(x), q // Write back movq n0, (z) movq n1, 8(z) movq n2, 16(z) movq n3, 24(z) movq n4, 32(z) movq q, 40(z) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_neg_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,195
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_cmul_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply by a single word modulo p_384, z := (c * x) mod p_384, assuming // x reduced // Inputs c, x[6]; output z[6] // // extern void bignum_cmul_p384(uint64_t z[static 6], uint64_t c, // const uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = c, RDX = x // Microsoft x64 ABI: RCX = z, RDX = c, R8 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p384) .text #define z %rdi // Temporarily moved here for initial multiply #define x %rcx // Likewise this is thrown away after initial multiply #define m %rdx #define a %rax #define c %rcx #define d0 %rsi #define d1 %r8 #define d2 %r9 #define d3 %r10 #define d4 %r11 #define d5 %r12 // Multiplier again for second stage #define q %rdx #define ashort %eax #define cshort %ecx #define qshort %edx S2N_BN_SYMBOL(bignum_cmul_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // We seem to need (just!) one extra register, which we need to save and restore CFI_PUSH(%r12) // Shuffle inputs (since we want multiplier in %rdx) movq %rdx, x movq %rsi, m // Multiply, accumulating the result as 2^384 * h + [d5;d4;d3;d2;d1;d0] // but actually immediately producing q = h + 1, our quotient approximation, // by adding 1 to it. Note that by hypothesis x is reduced mod p_384, so our // product is <= (2^64 - 1) * (p_384 - 1) and hence h <= 2^64 - 2, meaning // there is no danger this addition of 1 could wrap. mulxq (x), d0, d1 mulxq 8(x), a, d2 addq a, d1 mulxq 16(x), a, d3 adcq a, d2 mulxq 24(x), a, d4 adcq a, d3 mulxq 32(x), a, d5 adcq a, d4 mulxq 40(x), a, q adcq a, d5 adcq $1, q // It's easy to see -p_384 <= z - q * p_384 < p_384, so we just need to // subtract q * p_384 and then correct if that is negative by adding p_384. // // Write p_384 = 2^384 - r where r = 2^128 + 2^96 - 2^32 + 1 // // We want z - q * (2^384 - r) // = (2^384 * h + l) - q * (2^384 - r) // = 2^384 * (h - q) + (l + q * r) // = 2^384 * (-1) + (l + q * r) xorq c, c movq $0xffffffff00000001, a mulxq a, a, c adcxq a, d0 adoxq c, d1 movl $0x00000000ffffffff, ashort mulxq a, a, c adcxq a, d1 adoxq c, d2 adcxq q, d2 movl $0, ashort movl $0, cshort adoxq a, a adcq a, d3 adcq c, d4 adcq c, d5 adcq c, c subq $1, c // The net c value is now the top word of the 7-word answer, hence will // be -1 if we need a corrective addition, 0 otherwise, usable as a mask. // Now use that mask for a masked addition of p_384, which again is in // fact done by a masked subtraction of 2^384 - p_384, so that we only // have three nonzero digits and so can avoid using another register. movl $0x00000000ffffffff, qshort xorq a, a andq c, q subq q, a andq $1, c subq a, d0 movq d0, (z) sbbq q, d1 movq d1, 8(z) sbbq c, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) // Return CFI_POP(%r12) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cmul_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
8,889
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_montmul_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^384) mod p_384 // Inputs x[6], y[6]; output z[6] // // extern void bignum_montmul_p384(uint64_t z[static 6], // const uint64_t x[static 6], // const uint64_t y[static 6]); // // Does z := (2^{-384} * x * y) mod p_384, assuming that the inputs x and y // satisfy x * y <= 2^384 * p_384 (in particular this is true if we are in // the "usual" case x < p_384 and y < p_384). // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ----------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p384) .text #define z %rdi #define x %rsi // We move the y argument here so we can use %rdx for multipliers #define y %rcx // Some temp registers for the last correction stage #define d %rax #define u %rdx #define v %rcx #define w %rbx // Add %rdx * m into a register-pair (high,low) // maintaining consistent double-carrying with adcx and adox, // using %rax and %rbx as temporaries #define mulpadd(high,low,m) \ mulxq m, %rax, %rbx ; \ adcxq %rax, low ; \ adoxq %rbx, high // Core one-step Montgomery reduction macro. Takes input in // [d7;d6;d5;d4;d3;d2;d1;d0] and returns result in [d7;d6;d5;d4;d3;d2;d1], // adding to the existing contents, re-using d0 as a temporary internally // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // // montredc(d7,d6,d5,d4,d3,d2,d1,d0) // // This particular variant, with its mix of addition and subtraction // at the top, is not intended to maintain a coherent carry or borrow out. // It is assumed the final result would fit in [d7;d6;d5;d4;d3;d2;d1]. // which is always the case here as the top word is even always in {0,1} #define montredc(d7,d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rdx ; \ shlq $32, %rdx ; \ addq d0, %rdx ; \ /* Construct [%rbp;%rbx;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 as a temp */ \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0x00000000ffffffff, %ebx ; \ mulxq %rbx, d0, %rbx ; \ adcq d0, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq %rbx, d2 ; \ sbbq %rbp, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ sbbq $0, %rdx ; \ addq %rdx, d6 ; \ adcq $0, d7 S2N_BN_SYMBOL(bignum_montmul_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save more registers to play with CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) // Copy y into a safe register to start with movq %rdx, y // Do row 0 computation, which is a bit different: // set up initial window [%r14,%r13,%r12,%r11,%r10,%r9,%r8] = y[0] * x // Unlike later, we only need a single carry chain movq (y), %rdx xorl %r15d, %r15d mulxq (x), %r8, %r9 mulxq 8(x), %rbx, %r10 addq %rbx, %r9 mulxq 16(x), %rbx, %r11 adcq %rbx, %r10 mulxq 24(x), %rbx, %r12 adcq %rbx, %r11 mulxq 32(x), %rbx, %r13 adcq %rbx, %r12 mulxq 40(x), %rbx, %r14 adcq %rbx, %r13 adcq %r15, %r14 // Montgomery reduce the zeroth window montredc(%r15, %r14,%r13,%r12,%r11,%r10,%r9,%r8) // Add row 1 movq 8(y), %rdx xorl %r8d, %r8d mulpadd(%r10,%r9,(x)) mulpadd(%r11,%r10, 8(x)) mulpadd(%r12,%r11,16(x)) mulpadd(%r13,%r12,24(x)) mulpadd(%r14,%r13,32(x)) adoxq %r8, %r15 mulxq 40(x), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r8, %r8 // Montgomery reduce window 1 montredc(%r8, %r15,%r14,%r13,%r12,%r11,%r10,%r9) // Add row 2 movq 16(y), %rdx xorl %r9d, %r9d mulpadd(%r11,%r10,(x)) mulpadd(%r12,%r11,8(x)) mulpadd(%r13,%r12,16(x)) mulpadd(%r14,%r13,24(x)) mulpadd(%r15,%r14,32(x)) adoxq %r9, %r8 mulxq 40(x), %rax, %rbx adcq %rax, %r15 adcq %rbx, %r8 adcq %r9, %r9 // Montgomery reduce window 2 montredc(%r9, %r8,%r15,%r14,%r13,%r12,%r11,%r10) // Add row 3 movq 24(y), %rdx xorl %r10d, %r10d mulpadd(%r12,%r11,(x)) mulpadd(%r13,%r12,8(x)) mulpadd(%r14,%r13,16(x)) mulpadd(%r15,%r14,24(x)) mulpadd(%r8,%r15,32(x)) adoxq %r10, %r9 mulxq 40(x), %rax, %rbx adcq %rax, %r8 adcq %rbx, %r9 adcq %r10, %r10 // Montgomery reduce window 3 montredc(%r10, %r9,%r8,%r15,%r14,%r13,%r12,%r11) // Add row 4 movq 32(y), %rdx xorl %r11d, %r11d mulpadd(%r13,%r12,(x)) mulpadd(%r14,%r13,8(x)) mulpadd(%r15,%r14,16(x)) mulpadd(%r8,%r15,24(x)) mulpadd(%r9,%r8,32(x)) adoxq %r11, %r10 mulxq 40(x), %rax, %rbx adcq %rax, %r9 adcq %rbx, %r10 adcq %r11, %r11 // Montgomery reduce window 4 montredc(%r11, %r10,%r9,%r8,%r15,%r14,%r13,%r12) // Add row 5 movq 40(y), %rdx xorl %r12d, %r12d mulpadd(%r14,%r13,(x)) mulpadd(%r15,%r14,8(x)) mulpadd(%r8,%r15,16(x)) mulpadd(%r9,%r8,24(x)) mulpadd(%r10,%r9,32(x)) adoxq %r12, %r11 mulxq 40(x), %rax, %rbx adcq %rax, %r10 adcq %rbx, %r11 adcq %r12, %r12 // Montgomery reduce window 5 montredc(%r12, %r11,%r10,%r9,%r8,%r15,%r14,%r13) // We now have a pre-reduced 7-word form z = [%r12; %r11;%r10;%r9;%r8;%r15;%r14] // Next, accumulate in different registers z - p_384, or more precisely // // [%r12; %r13;%rbp;%rdx;%rcx;%rbx;%rax] = z + (2^384 - p_384) xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movq $0xffffffff00000001, %rax addq %r14, %rax movl $0x00000000ffffffff, %ebx adcq %r15, %rbx movl $0x0000000000000001, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0, %r12 // ~ZF <=> %r12 >= 1 <=> z + (2^384 - p_384) >= 2^384 <=> z >= p_384, which // determines whether to use the further reduced argument or the original z. cmovnzq %rax, %r14 cmovnzq %rbx, %r15 cmovnzq %rcx, %r8 cmovnzq %rdx, %r9 cmovnzq %rbp, %r10 cmovnzq %r13, %r11 // Write back the result movq %r14, (z) movq %r15, 8(z) movq %r8, 16(z) movq %r9, 24(z) movq %r10, 32(z) movq %r11, 40(z) // Restore registers and return CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
95,803
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/x86_att/p384/bignum_inv_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Modular inverse modulo p_384 = 2^384 - 2^128 - 2^96 + 2^32 - 1 // Input x[6]; output z[6] // // extern void bignum_inv_p384(uint64_t z[static 6],const uint64_t x[static 6]); // // If the 6-digit input x is coprime to p_384, i.e. is not divisible // by it, returns z < p_384 such that x * z == 1 (mod p_384). Note that // x does not need to be reduced modulo p_384, but the output always is. // If the input is divisible (i.e. is 0 or p_384), then there can be no // modular inverse and z = 0 is returned. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_x86_att.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_inv_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_inv_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_inv_p384) .text // Size in bytes of a 64-bit word #define N 8 // Pointer-offset pairs for temporaries on stack // The u and v variables are 6 words each as expected, but the f and g // variables are 8 words each -- they need to have at least one extra // word for a sign word, and to preserve alignment we "round up" to 8. // In fact, we currently keep an extra word in u and v as well. #define f 0(%rsp) #define g (8*N)(%rsp) #define u (16*N)(%rsp) #define v (24*N)(%rsp) #define tmp (32*N)(%rsp) #define tmp2 (33*N)(%rsp) #define i (34*N)(%rsp) #define d (35*N)(%rsp) #define mat (36*N)(%rsp) // Backup for the input pointer #define res (40*N)(%rsp) // Total size to reserve on the stack #define NSPACE 42*N // Syntactic variants to make x86_att version simpler to generate #define F 0 #define G (8*N) #define U (16*N) #define V (24*N) #define MAT (36*N) #define ff (%rsp) #define gg (8*N)(%rsp) // --------------------------------------------------------------------------- // Core signed almost-Montgomery reduction macro from P[6..0] to P[5..0]. // --------------------------------------------------------------------------- #define amontred(P) \ /* We only know the input is -2^444 < x < 2^444. To do traditional */ \ /* unsigned Montgomery reduction, start by adding 2^61 * p_384. */ \ movq $0xe000000000000000, %r8 ; \ xorl %eax, %eax ; \ addq P, %r8 ; \ movq $0x000000001fffffff, %r9 ; \ leaq -1(%rax), %rax ; \ adcq N+P, %r9 ; \ movq $0xdfffffffe0000000, %r10 ; \ adcq 2*N+P, %r10 ; \ movq 3*N+P, %r11 ; \ adcq %rax, %r11 ; \ movq 4*N+P, %r12 ; \ adcq %rax, %r12 ; \ movq 5*N+P, %r13 ; \ adcq %rax, %r13 ; \ movq $0x1fffffffffffffff, %r14 ; \ adcq 6*N+P, %r14 ; \ /* Correction multiplier is %rbx = w = [d0 + (d0<<32)] mod 2^64 */ \ movq %r8, %rbx ; \ shlq $32, %rbx ; \ addq %r8, %rbx ; \ /* Construct [%rbp;%rdx;%rax;-] = (2^384 - p_384) * w */ \ /* We know lowest word will cancel so can re-use %r8 as a temp */ \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r8 ; \ movq $0x00000000ffffffff, %rax ; \ mulq %rbx; \ addq %r8, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ /* Now subtract that and add 2^384 * w, catching carry in %rax */ \ subq %rax, %r9 ; \ sbbq %rdx, %r10 ; \ sbbq %rbp, %r11 ; \ sbbq $0, %r12 ; \ sbbq $0, %r13 ; \ sbbq $0, %r14 ; \ sbbq %rax, %rax ; \ addq %rbx, %r14 ; \ adcq $0, %rax ; \ /* Now if top is nonzero we subtract p_384 (almost-Montgomery) */ \ negq %rax; \ movq $0x00000000ffffffff, %rbx ; \ andq %rax, %rbx ; \ movq $0xffffffff00000000, %rcx ; \ andq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rdx ; \ andq %rax, %rdx ; \ subq %rbx, %r9 ; \ movq %r9, P ; \ sbbq %rcx, %r10 ; \ movq %r10, N+P ; \ sbbq %rdx, %r11 ; \ movq %r11, 2*N+P ; \ sbbq %rax, %r12 ; \ movq %r12, 3*N+P ; \ sbbq %rax, %r13 ; \ movq %r13, 4*N+P ; \ sbbq %rax, %r14 ; \ movq %r14, 5*N+P // Very similar to a subroutine call to the s2n-bignum word_divstep59. // But different in register usage and returning the final matrix as // // [ %r8 %r10] // [ %r12 %r14] // // and also returning the matrix still negated (which doesn't matter) #define divstep59(din,fin,gin) \ movq din, %rsi ; \ movq fin, %rdx ; \ movq gin, %rcx ; \ movq %rdx, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rax ; \ xorl %ebp, %ebp ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %rdx ; \ leaq (%rcx,%rax), %rdi ; \ shlq $0x16, %rdx ; \ shlq $0x16, %rdi ; \ sarq $0x2b, %rdx ; \ sarq $0x2b, %rdi ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %rbx ; \ leaq (%rcx,%rax), %rcx ; \ sarq $0x2a, %rbx ; \ sarq $0x2a, %rcx ; \ movq %rdx, MAT(%rsp) ; \ movq %rbx, MAT+0x8(%rsp) ; \ movq %rdi, MAT+0x10(%rsp) ; \ movq %rcx, MAT+0x18(%rsp) ; \ movq fin, %r12 ; \ imulq %r12, %rdi ; \ imulq %rdx, %r12 ; \ movq gin, %r13 ; \ imulq %r13, %rbx ; \ imulq %rcx, %r13 ; \ addq %rbx, %r12 ; \ addq %rdi, %r13 ; \ sarq $0x14, %r12 ; \ sarq $0x14, %r13 ; \ movq %r12, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ movq %r13, %rcx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rax ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %r8 ; \ leaq (%rcx,%rax), %r10 ; \ shlq $0x16, %r8 ; \ shlq $0x16, %r10 ; \ sarq $0x2b, %r8 ; \ sarq $0x2b, %r10 ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %r15 ; \ leaq (%rcx,%rax), %r11 ; \ sarq $0x2a, %r15 ; \ sarq $0x2a, %r11 ; \ movq %r13, %rbx ; \ movq %r12, %rcx ; \ imulq %r8, %r12 ; \ imulq %r15, %rbx ; \ addq %rbx, %r12 ; \ imulq %r11, %r13 ; \ imulq %r10, %rcx ; \ addq %rcx, %r13 ; \ sarq $0x14, %r12 ; \ sarq $0x14, %r13 ; \ movq %r12, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ movq %r13, %rcx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq MAT(%rsp), %rax ; \ imulq %r8, %rax ; \ movq MAT+0x10(%rsp), %rdx ; \ imulq %r15, %rdx ; \ imulq MAT+0x8(%rsp), %r8 ; \ imulq MAT+0x18(%rsp), %r15 ; \ addq %r8, %r15 ; \ leaq (%rax,%rdx), %r9 ; \ movq MAT(%rsp), %rax ; \ imulq %r10, %rax ; \ movq MAT+0x10(%rsp), %rdx ; \ imulq %r11, %rdx ; \ imulq MAT+0x8(%rsp), %r10 ; \ imulq MAT+0x18(%rsp), %r11 ; \ addq %r10, %r11 ; \ leaq (%rax,%rdx), %r13 ; \ movq $0xfffffffffffffffe, %rax ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %r8 ; \ leaq (%rcx,%rax), %r12 ; \ shlq $0x15, %r8 ; \ shlq $0x15, %r12 ; \ sarq $0x2b, %r8 ; \ sarq $0x2b, %r12 ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %r10 ; \ leaq (%rcx,%rax), %r14 ; \ sarq $0x2b, %r10 ; \ sarq $0x2b, %r14 ; \ movq %r9, %rax ; \ imulq %r8, %rax ; \ movq %r13, %rdx ; \ imulq %r10, %rdx ; \ imulq %r15, %r8 ; \ imulq %r11, %r10 ; \ addq %r8, %r10 ; \ leaq (%rax,%rdx), %r8 ; \ movq %r9, %rax ; \ imulq %r12, %rax ; \ movq %r13, %rdx ; \ imulq %r14, %rdx ; \ imulq %r15, %r12 ; \ imulq %r11, %r14 ; \ addq %r12, %r14 ; \ leaq (%rax,%rdx), %r12 S2N_BN_SYMBOL(bignum_inv_p384): CFI_START _CET_ENDBR #if WINDOWS_ABI CFI_PUSH(%rdi) CFI_PUSH(%rsi) movq %rcx, %rdi movq %rdx, %rsi #endif // Save registers and make room for temporaries CFI_PUSH(%rbx) CFI_PUSH(%rbp) CFI_PUSH(%r12) CFI_PUSH(%r13) CFI_PUSH(%r14) CFI_PUSH(%r15) CFI_DEC_RSP(NSPACE) // Save the return pointer for the end so we can overwrite %rdi later movq %rdi, res // Copy the constant p_384 into f including the 7th zero digit movl $0xffffffff, %eax movq %rax, F(%rsp) movq %rax, %rbx notq %rbx movq %rbx, F+N(%rsp) xorl %ebp, %ebp leaq -2(%rbp), %rcx movq %rcx, F+2*N(%rsp) leaq -1(%rbp), %rdx movq %rdx, F+3*N(%rsp) movq %rdx, F+4*N(%rsp) movq %rdx, F+5*N(%rsp) movq %rbp, F+6*N(%rsp) // Copy input but to g, reduced mod p_384 so that g <= f as assumed // in the divstep bound proof. movq (%rsi), %r8 subq %rax, %r8 movq N(%rsi), %r9 sbbq %rbx, %r9 movq 2*N(%rsi), %r10 sbbq %rcx, %r10 movq 3*N(%rsi), %r11 sbbq %rdx, %r11 movq 4*N(%rsi), %r12 sbbq %rdx, %r12 movq 5*N(%rsi), %r13 sbbq %rdx, %r13 cmovcq (%rsi), %r8 cmovcq N(%rsi), %r9 cmovcq 2*N(%rsi), %r10 cmovcq 3*N(%rsi), %r11 cmovcq 4*N(%rsi), %r12 cmovcq 5*N(%rsi), %r13 movq %r8, G(%rsp) movq %r9, G+N(%rsp) movq %r10, G+2*N(%rsp) movq %r11, G+3*N(%rsp) movq %r12, G+4*N(%rsp) movq %r13, G+5*N(%rsp) movq %rbp, G+6*N(%rsp) // Also maintain reduced < 2^384 vector [u,v] such that // [f,g] == x * 2^{5*i-75} * [u,v] (mod p_384) // starting with [p_384,x] == x * 2^{5*0-75} * [0,2^75] (mod p_384) // The weird-looking 5*i modifications come in because we are doing // 64-bit word-sized Montgomery reductions at each stage, which is // 5 bits more than the 59-bit requirement to keep things stable. xorl %eax, %eax movq %rax, U(%rsp) movq %rax, U+N(%rsp) movq %rax, U+2*N(%rsp) movq %rax, U+3*N(%rsp) movq %rax, U+4*N(%rsp) movq %rax, U+5*N(%rsp) movl $2048, %ecx movq %rax, V(%rsp) movq %rcx, V+N(%rsp) movq %rax, V+2*N(%rsp) movq %rax, V+3*N(%rsp) movq %rax, V+4*N(%rsp) movq %rax, V+5*N(%rsp) // Start of main loop. We jump into the middle so that the divstep // portion is common to the special fifteenth iteration after a uniform // first 14. movq $15, i movq $1, d jmp Lbignum_inv_p384_midloop Lbignum_inv_p384_loop: // Separate out the matrix into sign-magnitude pairs movq %r8, %r9 sarq $63, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $63, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $63, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $63, %r15 xorq %r15, %r14 subq %r15, %r14 // Adjust the initial values to allow for complement instead of negation // This initial offset is the same for [f,g] and [u,v] compositions. // Save it in temporary storage for the [u,v] part and do [f,g] first. movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, tmp movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, tmp2 // Now the computation of the updated f and g values. This maintains a // 2-word carry between stages so we can conveniently insert the shift // right by 59 before storing back, and not overwrite digits we need // again of the old f and g values. // // Digit 0 of [f,g] xorl %ebx, %ebx movq F(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq G(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq F(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq G(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp // Digit 1 of [f,g] xorl %ecx, %ecx movq F+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq G+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $59, %rbx, %rdi movq %rdi, F(%rsp) xorl %edi, %edi movq F+N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq G+N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $59, %rbp, %rsi movq %rsi, G(%rsp) // Digit 2 of [f,g] xorl %esi, %esi movq F+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq G+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $59, %rcx, %rbx movq %rbx, F+N(%rsp) xorl %ebx, %ebx movq F+2*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq G+2*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $59, %rdi, %rbp movq %rbp, G+N(%rsp) // Digit 3 of [f,g] xorl %ebp, %ebp movq F+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq G+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $59, %rsi, %rcx movq %rcx, F+2*N(%rsp) xorl %ecx, %ecx movq F+3*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbx adcq %rdx, %rcx movq G+3*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbx adcq %rdx, %rcx shrdq $59, %rbx, %rdi movq %rdi, G+2*N(%rsp) // Digit 4 of [f,g] xorl %edi, %edi movq F+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbp adcq %rdx, %rdi movq G+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbp adcq %rdx, %rdi shrdq $59, %rbp, %rsi movq %rsi, F+3*N(%rsp) xorl %esi, %esi movq F+4*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rcx adcq %rdx, %rsi movq G+4*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rcx adcq %rdx, %rsi shrdq $59, %rcx, %rbx movq %rbx, G+3*N(%rsp) // Digits 5 and 6 of [f,g] movq F+5*N(%rsp), %rax xorq %r9, %rax movq F+6*N(%rsp), %rbx xorq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq G+5*N(%rsp), %rax xorq %r11, %rax movq G+6*N(%rsp), %rdx xorq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rdi adcq %rdx, %rbx shrdq $59, %rdi, %rbp movq %rbp, F+4*N(%rsp) shrdq $59, %rbx, %rdi sarq $59, %rbx movq F+5*N(%rsp), %rax movq %rdi, F+5*N(%rsp) movq F+6*N(%rsp), %rdi movq %rbx, F+6*N(%rsp) xorq %r13, %rax xorq %r13, %rdi andq %r12, %rdi negq %rdi mulq %r12 addq %rax, %rsi adcq %rdx, %rdi movq G+5*N(%rsp), %rax xorq %r15, %rax movq G+6*N(%rsp), %rdx xorq %r15, %rdx andq %r14, %rdx subq %rdx, %rdi mulq %r14 addq %rax, %rsi adcq %rdx, %rdi shrdq $59, %rsi, %rcx movq %rcx, G+4*N(%rsp) shrdq $59, %rdi, %rsi movq %rsi, G+5*N(%rsp) sarq $59, %rdi movq %rdi, G+6*N(%rsp) // Get the initial carries back from storage and do the [u,v] accumulation movq tmp, %rbx movq tmp2, %rbp // Digit 0 of [u,v] xorl %ecx, %ecx movq U(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V(%rsp) // Digit 1 of [u,v] xorl %ebx, %ebx movq U+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq U+N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, U+N(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq V+N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, V+N(%rsp) // Digit 2 of [u,v] xorl %ecx, %ecx movq U+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U+2*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U+2*N(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V+2*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V+2*N(%rsp) // Digit 3 of [u,v] xorl %ebx, %ebx movq U+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq U+3*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, U+3*N(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq V+3*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, V+3*N(%rsp) // Digit 4 of [u,v] xorl %ecx, %ecx movq U+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U+4*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U+4*N(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V+4*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V+4*N(%rsp) // Digits 5 and 6 of u (top is unsigned) movq U+5*N(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+5*N(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx // Preload for last use of old u digit 3 movq U+5*N(%rsp), %rax movq %rcx, U+5*N(%rsp) movq %rdx, U+6*N(%rsp) // Digits 5 and 6 of v (top is unsigned) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq V+5*N(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rsi, V+5*N(%rsp) movq %rdx, V+6*N(%rsp) // Montgomery reduction of u amontred(u) // Montgomery reduction of v amontred(v) Lbignum_inv_p384_midloop: divstep59(d,ff,gg) movq %rsi, d // Next iteration decq i jnz Lbignum_inv_p384_loop // The 15th and last iteration does not need anything except the // u value and the sign of f; the latter can be obtained from the // lowest word of f. So it's done differently from the main loop. // Find the sign of the new f. For this we just need one digit // since we know (for in-scope cases) that f is either +1 or -1. // We don't explicitly shift right by 59 either, but looking at // bit 63 (or any bit >= 60) of the unshifted result is enough // to distinguish -1 from +1; this is then made into a mask. movq F(%rsp), %rax movq G(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $63, %rax // Now separate out the matrix into sign-magnitude pairs // and adjust each one based on the sign of f. // // Note that at this point we expect |f|=1 and we got its // sign above, so then since [f,0] == x * [u,v] (mod p_384) // we want to flip the sign of u according to that of f. movq %r8, %r9 sarq $63, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $63, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $63, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $63, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 // Adjust the initial value to allow for complement instead of negation movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 // Digit 0 of [u] xorl %r13d, %r13d movq U(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq V(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 movq %r12, U(%rsp) adcq %rdx, %r13 // Digit 1 of [u] xorl %r14d, %r14d movq U+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq V+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 movq %r13, U+N(%rsp) adcq %rdx, %r14 // Digit 2 of [u] xorl %r15d, %r15d movq U+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq V+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 movq %r14, U+2*N(%rsp) adcq %rdx, %r15 // Digit 3 of [u] xorl %r14d, %r14d movq U+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r15 adcq %rdx, %r14 movq V+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r15 movq %r15, U+3*N(%rsp) adcq %rdx, %r14 // Digit 4 of [u] xorl %r15d, %r15d movq U+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq V+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 movq %r14, U+4*N(%rsp) adcq %rdx, %r15 // Digits 5 and 6 of u (top is unsigned) movq U+5*N(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq V+5*N(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 movq %r15, U+5*N(%rsp) adcq %rdx, %r9 movq %r9, U+6*N(%rsp) // Montgomery reduce u amontred(u) // Perform final strict reduction mod p_384 and copy to output movl $0xffffffff, %eax movq %rax, %rbx notq %rbx xorl %ebp, %ebp leaq -2(%rbp), %rcx leaq -1(%rbp), %rdx movq U(%rsp), %r8 subq %rax, %r8 movq U+N(%rsp), %r9 sbbq %rbx, %r9 movq U+2*N(%rsp), %r10 sbbq %rcx, %r10 movq U+3*N(%rsp), %r11 sbbq %rdx, %r11 movq U+4*N(%rsp), %r12 sbbq %rdx, %r12 movq U+5*N(%rsp), %r13 sbbq %rdx, %r13 cmovcq U(%rsp), %r8 cmovcq U+N(%rsp), %r9 cmovcq U+2*N(%rsp), %r10 cmovcq U+3*N(%rsp), %r11 cmovcq U+4*N(%rsp), %r12 cmovcq U+5*N(%rsp), %r13 movq res, %rdi movq %r8, (%rdi) movq %r9, N(%rdi) movq %r10, 2*N(%rdi) movq %r11, 3*N(%rdi) movq %r12, 4*N(%rdi) movq %r13, 5*N(%rdi) // Restore stack and registers CFI_INC_RSP(NSPACE) CFI_POP(%r15) CFI_POP(%r14) CFI_POP(%r13) CFI_POP(%r12) CFI_POP(%rbp) CFI_POP(%rbx) #if WINDOWS_ABI CFI_POP(%rsi) CFI_POP(%rdi) #endif CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_inv_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif