repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
chairq/First-choice
| 25,836
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/vpaes-armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <ring-core/arm_arch.h>
.section .rodata
.align 7 // totally strategic alignment
_vpaes_consts:
Lk_mc_forward: // mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
Lk_mc_backward: // mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
Lk_sr: // sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
//
// "Hot" constants
//
Lk_inv: // inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
Lk_ipt: // input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
Lk_sbo: // sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
Lk_sb1: // sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
Lk_sb2: // sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
//
// Key schedule constants
//
Lk_dksd: // decryption key schedule: invskew x*D
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
Lk_dksb: // decryption key schedule: invskew x*B
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
Lk_dkse: // decryption key schedule: invskew x*E + 0x63
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
Lk_dks9: // decryption key schedule: invskew x*9
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
Lk_rcon: // rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
Lk_opt: // output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
Lk_deskew: // deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.align 6
.text
##
## _aes_preheat
##
## Fills register %r10 -> .aes_consts (so you can -fPIC)
## and %xmm9-%xmm15 as specified below.
##
.def _vpaes_encrypt_preheat
.type 32
.endef
.align 4
_vpaes_encrypt_preheat:
adrp x10, Lk_inv
add x10, x10, :lo12:Lk_inv
movi v17.16b, #0x0f
ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv
ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2
ret
##
## _aes_encrypt_core
##
## AES-encrypt %xmm0.
##
## Inputs:
## %xmm0 = input
## %xmm9-%xmm15 as in _vpaes_preheat
## (%rdx) = scheduled keys
##
## Output in %xmm0
## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
## Preserves %xmm6 - %xmm8 so you get some local vectors
##
##
.def _vpaes_encrypt_core
.type 32
.endef
.align 4
_vpaes_encrypt_core:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward+16
add x11, x11, :lo12:Lk_mc_forward+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
b Lenc_entry
.align 4
Lenc_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
sub w8, w8, #1 // nr--
Lenc_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
ret
.globl vpaes_encrypt
.def vpaes_encrypt
.type 32
.endef
.align 4
vpaes_encrypt:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v7.16b}, [x0]
bl _vpaes_encrypt_preheat
bl _vpaes_encrypt_core
st1 {v0.16b}, [x1]
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.def _vpaes_encrypt_2x
.type 32
.endef
.align 4
_vpaes_encrypt_2x:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward+16
add x11, x11, :lo12:Lk_mc_forward+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
and v9.16b, v15.16b, v17.16b
ushr v8.16b, v15.16b, #4
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
tbl v9.16b, {v20.16b}, v9.16b
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
tbl v10.16b, {v21.16b}, v8.16b
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v8.16b, v9.16b, v16.16b
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
eor v8.16b, v8.16b, v10.16b
b Lenc_2x_entry
.align 4
Lenc_2x_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
tbl v12.16b, {v25.16b}, v10.16b
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
tbl v8.16b, {v24.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
tbl v13.16b, {v27.16b}, v10.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
tbl v10.16b, {v26.16b}, v11.16b
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
tbl v11.16b, {v8.16b}, v1.16b
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
eor v10.16b, v10.16b, v13.16b
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
tbl v8.16b, {v8.16b}, v4.16b
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
eor v11.16b, v11.16b, v10.16b
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
tbl v12.16b, {v11.16b},v1.16b
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
eor v8.16b, v8.16b, v11.16b
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
eor v8.16b, v8.16b, v12.16b
sub w8, w8, #1 // nr--
Lenc_2x_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
and v9.16b, v8.16b, v17.16b
ushr v8.16b, v8.16b, #4
tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
tbl v13.16b, {v19.16b},v9.16b
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
eor v9.16b, v9.16b, v8.16b
tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v11.16b, {v18.16b},v8.16b
tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
tbl v12.16b, {v18.16b},v9.16b
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v11.16b, v11.16b, v13.16b
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
eor v12.16b, v12.16b, v13.16b
tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v10.16b, {v18.16b},v11.16b
tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
tbl v11.16b, {v18.16b},v12.16b
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v10.16b, v10.16b, v9.16b
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
eor v11.16b, v11.16b, v8.16b
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_2x_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
tbl v12.16b, {v22.16b}, v10.16b
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
tbl v8.16b, {v23.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
tbl v1.16b, {v8.16b},v1.16b
ret
########################################################
## ##
## AES key schedule ##
## ##
########################################################
.def _vpaes_key_preheat
.type 32
.endef
.align 4
_vpaes_key_preheat:
adrp x10, Lk_inv
add x10, x10, :lo12:Lk_inv
movi v16.16b, #0x5b // Lk_s63
adrp x11, Lk_sb1
add x11, x11, :lo12:Lk_sb1
movi v17.16b, #0x0f // Lk_s0F
ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt
adrp x10, Lk_dksd
add x10, x10, :lo12:Lk_dksd
ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1
adrp x11, Lk_mc_forward
add x11, x11, :lo12:Lk_mc_forward
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb
ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9
ld1 {v8.2d}, [x10] // Lk_rcon
ld1 {v9.2d}, [x11] // Lk_mc_forward[0]
ret
.def _vpaes_schedule_core
.type 32
.endef
.align 4
_vpaes_schedule_core:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp,#-16]!
add x29,sp,#0
bl _vpaes_key_preheat // load the tables
ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
// input transform
mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
adrp x10, Lk_sr // lea Lk_sr(%rip),%r10
add x10, x10, :lo12:Lk_sr
add x8, x8, x10
// encrypting, output zeroth round key after transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx)
cmp w1, #192 // cmp $192, %esi
b.hi Lschedule_256
b.eq Lschedule_192
// 128: fall though
##
## .schedule_128
##
## 128-bit specific part of key schedule.
##
## This schedule is really simple, because all its parts
## are accomplished by the subroutines.
##
Lschedule_128:
mov x0, #10 // mov $10, %esi
Loop_schedule_128:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // write output
b Loop_schedule_128
##
## .aes_schedule_192
##
## 192-bit specific part of key schedule.
##
## The main body of this schedule is the same as the 128-bit
## schedule, but with more smearing. The long, high side is
## stored in %xmm7 as before, and the short, low side is in
## the high bits of %xmm6.
##
## This schedule is somewhat nastier, however, because each
## round produces 192 bits of key material, or 1.5 round keys.
## Therefore, on each cycle we do 2 rounds and produce 3 round
## keys.
##
.align 4
Lschedule_192:
sub x0, x0, #8
ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
bl _vpaes_schedule_transform // input transform
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
mov x0, #4 // mov $4, %esi
Loop_schedule_192:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0
bl _vpaes_schedule_mangle // save key n
bl _vpaes_schedule_192_smear
bl _vpaes_schedule_mangle // save key n+1
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // save key n+2
bl _vpaes_schedule_192_smear
b Loop_schedule_192
##
## .aes_schedule_256
##
## 256-bit specific part of key schedule.
##
## The structure here is very similar to the 128-bit
## schedule, but with an additional "low side" in
## %xmm6. The low side's rounds are the same as the
## high side's, except no rcon and no rotation.
##
.align 4
Lschedule_256:
ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform // input transform
mov x0, #7 // mov $7, %esi
Loop_schedule_256:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_mangle // output low result
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
// high round
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle
// low round. swap xmm7 and xmm6
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
movi v4.16b, #0
mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
b Loop_schedule_256
##
## .aes_schedule_mangle_last
##
## Mangler for last round of key schedule
## Mangles %xmm0
## when encrypting, outputs out(%xmm0) ^ 63
## when decrypting, outputs unskew(%xmm0)
##
## Always called right before return... jumps to cleanup and exits
##
.align 4
Lschedule_mangle_last:
// schedule last round key from xmm0
adrp x11, Lk_deskew // lea Lk_deskew(%rip),%r11 # prepare to deskew
add x11, x11, :lo12:Lk_deskew
cbnz w3, Lschedule_mangle_last_dec
// encrypting
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
adrp x11, Lk_opt // lea Lk_opt(%rip), %r11 # prepare to output transform
add x11, x11, :lo12:Lk_opt
add x2, x2, #32 // add $32, %rdx
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
Lschedule_mangle_last_dec:
ld1 {v20.2d,v21.2d}, [x11] // reload constants
sub x2, x2, #16 // add $-16, %rdx
eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform // output transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key
// cleanup
eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
ldp x29, x30, [sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
##
## .aes_schedule_192_smear
##
## Smear the short, low side in the 192-bit key schedule.
##
## Inputs:
## %xmm7: high side, b a x y
## %xmm6: low side, d c 0 0
## %xmm13: 0
##
## Outputs:
## %xmm6: b+c+d b+c 0 0
## %xmm0: b+c+d b+c b a
##
.def _vpaes_schedule_192_smear
.type 32
.endef
.align 4
_vpaes_schedule_192_smear:
movi v1.16b, #0
dup v0.4s, v7.s[3]
ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
ret
##
## .aes_schedule_round
##
## Runs one main round of the key schedule on %xmm0, %xmm7
##
## Specifically, runs subbytes on the high dword of %xmm0
## then rotates it by one byte and xors into the low dword of
## %xmm7.
##
## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
## next rcon.
##
## Smears the dwords of %xmm7 by xoring the low into the
## second low, result into third, result into highest.
##
## Returns results in %xmm7 = %xmm0.
## Clobbers %xmm1-%xmm4, %r11.
##
.def _vpaes_schedule_round
.type 32
.endef
.align 4
_vpaes_schedule_round:
// extract rcon from xmm8
movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1
ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
// rotate
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0
// fall through...
// low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
// smear xmm7
ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4
// subbytes
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7
tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
// add in smeared stuff
eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
ret
##
## .aes_schedule_transform
##
## Linear-transform %xmm0 according to tables at (%r11)
##
## Requires that %xmm9 = 0x0F0F... as in preheat
## Output in %xmm0
## Clobbers %xmm1, %xmm2
##
.def _vpaes_schedule_transform
.type 32
.endef
.align 4
_vpaes_schedule_transform:
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0
// vmovdqa (%r11), %xmm2 # lo
tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
// vmovdqa 16(%r11), %xmm1 # hi
tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
ret
##
## .aes_schedule_mangle
##
## Mangle xmm0 from (basis-transformed) standard version
## to our version.
##
## On encrypt,
## xor with 0x63
## multiply by circulant 0,1,1,1
## apply shiftrows transform
##
## On decrypt,
## xor with 0x63
## multiply by "inverse mixcolumns" circulant E,B,D,9
## deskew
## apply shiftrows transform
##
##
## Writes out to (%rdx), and increments or decrements it
## Keeps track of round number mod 4 in %r8
## Preserves xmm0
## Clobbers xmm1-xmm5
##
.def _vpaes_schedule_mangle
.type 32
.endef
.align 4
_vpaes_schedule_mangle:
mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
// vmovdqa .Lk_mc_forward(%rip),%xmm5
// encrypting
eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4
add x2, x2, #16 // add $16, %rdx
tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
Lschedule_mangle_both:
tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
add x8, x8, #48 // add $-16, %r8
and x8, x8, #~(1<<6) // and $0x30, %r8
st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
ret
.globl vpaes_set_encrypt_key
.def vpaes_set_encrypt_key
.type 32
.endef
.align 4
vpaes_set_encrypt_key:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
lsr w9, w1, #5 // shr $5,%eax
add w9, w9, #5 // $5,%eax
str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov w3, #0 // mov $0,%ecx
mov x8, #0x30 // mov $0x30,%r8d
bl _vpaes_schedule_core
eor x0, x0, x0
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl vpaes_ctr32_encrypt_blocks
.def vpaes_ctr32_encrypt_blocks
.type 32
.endef
.align 4
vpaes_ctr32_encrypt_blocks:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
stp d10,d11,[sp,#-16]!
stp d12,d13,[sp,#-16]!
stp d14,d15,[sp,#-16]!
cbz x2, Lctr32_done
// Note, unlike the other functions, x2 here is measured in blocks,
// not bytes.
mov x17, x2
mov x2, x3
// Load the IV and counter portion.
ldr w6, [x4, #12]
ld1 {v7.16b}, [x4]
bl _vpaes_encrypt_preheat
tst x17, #1
rev w6, w6 // The counter is big-endian.
b.eq Lctr32_prep_loop
// Handle one block so the remaining block count is even for
// _vpaes_encrypt_2x.
ld1 {v6.16b}, [x0], #16 // Load input ahead of time
bl _vpaes_encrypt_core
eor v0.16b, v0.16b, v6.16b // XOR input and result
st1 {v0.16b}, [x1], #16
subs x17, x17, #1
// Update the counter.
add w6, w6, #1
rev w7, w6
mov v7.s[3], w7
b.ls Lctr32_done
Lctr32_prep_loop:
// _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x
// uses v14 and v15.
mov v15.16b, v7.16b
mov v14.16b, v7.16b
add w6, w6, #1
rev w7, w6
mov v15.s[3], w7
Lctr32_loop:
ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time
bl _vpaes_encrypt_2x
eor v0.16b, v0.16b, v6.16b // XOR input and result
eor v1.16b, v1.16b, v7.16b // XOR input and result (#2)
st1 {v0.16b,v1.16b}, [x1], #32
subs x17, x17, #2
// Update the counter.
add w7, w6, #1
add w6, w6, #2
rev w7, w7
mov v14.s[3], w7
rev w7, w6
mov v15.s[3], w7
b.hi Lctr32_loop
Lctr32_done:
ldp d14,d15,[sp],#16
ldp d12,d13,[sp],#16
ldp d10,d11,[sp],#16
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
chairq/First-choice
| 69,197
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/sha256-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _sha256_block_data_order
.private_extern _sha256_block_data_order
.p2align 4
_sha256_block_data_order:
_CET_ENDBR
leaq _OPENSSL_ia32cap_P(%rip),%r11
movl 0(%r11),%r9d
movl 4(%r11),%r10d
movl 8(%r11),%r11d
testl $536870912,%r11d
jnz L$shaext_shortcut
andl $1073741824,%r9d
andl $268435968,%r10d
orl %r9d,%r10d
cmpl $1342177792,%r10d
je L$avx_shortcut
testl $512,%r10d
jnz L$ssse3_shortcut
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $64+32,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
L$prologue:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp L$loop
.p2align 4
L$loop:
movl %ebx,%edi
leaq K256(%rip),%rbp
xorl %ecx,%edi
movl 0(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 4(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 8(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 12(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 16(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 20(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 24(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 28(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
addl %r14d,%eax
movl 32(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 36(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 40(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 44(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 48(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 52(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 56(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 60(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
jmp L$rounds_16_xx
.p2align 4
L$rounds_16_xx:
movl 4(%rsp),%r13d
movl 56(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 36(%rsp),%r12d
addl 0(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 8(%rsp),%r13d
movl 60(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 40(%rsp),%r12d
addl 4(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 12(%rsp),%r13d
movl 0(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 44(%rsp),%r12d
addl 8(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 16(%rsp),%r13d
movl 4(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 48(%rsp),%r12d
addl 12(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 20(%rsp),%r13d
movl 8(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 52(%rsp),%r12d
addl 16(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 24(%rsp),%r13d
movl 12(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 56(%rsp),%r12d
addl 20(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 28(%rsp),%r13d
movl 16(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 60(%rsp),%r12d
addl 24(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 32(%rsp),%r13d
movl 20(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 0(%rsp),%r12d
addl 28(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
movl 36(%rsp),%r13d
movl 24(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 4(%rsp),%r12d
addl 32(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 40(%rsp),%r13d
movl 28(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 8(%rsp),%r12d
addl 36(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 44(%rsp),%r13d
movl 32(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 12(%rsp),%r12d
addl 40(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 48(%rsp),%r13d
movl 36(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 16(%rsp),%r12d
addl 44(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 52(%rsp),%r13d
movl 40(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 20(%rsp),%r12d
addl 48(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 56(%rsp),%r13d
movl 44(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 24(%rsp),%r12d
addl 52(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 60(%rsp),%r13d
movl 48(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 28(%rsp),%r12d
addl 56(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 0(%rsp),%r13d
movl 52(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 32(%rsp),%r12d
addl 60(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
cmpb $0,3(%rbp)
jnz L$rounds_16_xx
movq 64+0(%rsp),%rdi
addl %r14d,%eax
leaq 64(%rsi),%rsi
addl 0(%rdi),%eax
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb L$loop
movq 88(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue:
ret
.section __DATA,__const
.p2align 6
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.p2align 6
sha256_block_data_order_shaext:
L$shaext_shortcut:
leaq K256+128(%rip),%rcx
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa 512-128(%rcx),%xmm7
pshufd $0x1b,%xmm1,%xmm0
pshufd $0xb1,%xmm1,%xmm1
pshufd $0x1b,%xmm2,%xmm2
movdqa %xmm7,%xmm8
.byte 102,15,58,15,202,8
punpcklqdq %xmm0,%xmm2
jmp L$oop_shaext
.p2align 4
L$oop_shaext:
movdqu (%rsi),%xmm3
movdqu 16(%rsi),%xmm4
movdqu 32(%rsi),%xmm5
.byte 102,15,56,0,223
movdqu 48(%rsi),%xmm6
movdqa 0-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 102,15,56,0,231
movdqa %xmm2,%xmm10
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
nop
movdqa %xmm1,%xmm9
.byte 15,56,203,202
movdqa 32-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 102,15,56,0,239
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
leaq 64(%rsi),%rsi
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 64-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 102,15,56,0,247
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 96-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 128-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 160-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 192-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 224-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 256-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 288-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 320-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 352-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 384-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 416-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
.byte 15,56,203,202
paddd %xmm7,%xmm6
movdqa 448-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
.byte 15,56,205,245
movdqa %xmm8,%xmm7
.byte 15,56,203,202
movdqa 480-128(%rcx),%xmm0
paddd %xmm6,%xmm0
nop
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
decq %rdx
nop
.byte 15,56,203,202
paddd %xmm10,%xmm2
paddd %xmm9,%xmm1
jnz L$oop_shaext
pshufd $0xb1,%xmm2,%xmm2
pshufd $0x1b,%xmm1,%xmm7
pshufd $0xb1,%xmm1,%xmm1
punpckhqdq %xmm2,%xmm1
.byte 102,15,58,15,215,8
movdqu %xmm1,(%rdi)
movdqu %xmm2,16(%rdi)
ret
.p2align 6
sha256_block_data_order_ssse3:
L$ssse3_shortcut:
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
L$prologue_ssse3:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp L$loop_ssse3
.p2align 4
L$loop_ssse3:
movdqa K256+512(%rip),%xmm7
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
.byte 102,15,56,0,199
movdqu 48(%rsi),%xmm3
leaq K256(%rip),%rbp
.byte 102,15,56,0,207
movdqa 0(%rbp),%xmm4
movdqa 32(%rbp),%xmm5
.byte 102,15,56,0,215
paddd %xmm0,%xmm4
movdqa 64(%rbp),%xmm6
.byte 102,15,56,0,223
movdqa 96(%rbp),%xmm7
paddd %xmm1,%xmm5
paddd %xmm2,%xmm6
paddd %xmm3,%xmm7
movdqa %xmm4,0(%rsp)
movl %eax,%r14d
movdqa %xmm5,16(%rsp)
movl %ebx,%edi
movdqa %xmm6,32(%rsp)
xorl %ecx,%edi
movdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp L$ssse3_00_47
.p2align 4
L$ssse3_00_47:
subq $-128,%rbp
rorl $14,%r13d
movdqa %xmm1,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm3,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,224,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,250,4
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm3,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 4(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm0
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm0
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm0,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 0(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm0,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,0(%rsp)
rorl $14,%r13d
movdqa %xmm2,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm0,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,225,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,251,4
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm0,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 20(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm1
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm1
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm1,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 32(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm1,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,16(%rsp)
rorl $14,%r13d
movdqa %xmm3,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm1,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,226,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,248,4
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm1,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 36(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm2
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm2
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm2,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 64(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm2,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,32(%rsp)
rorl $14,%r13d
movdqa %xmm0,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm2,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,227,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,249,4
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm2,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 52(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm3
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm3
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm3,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 96(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm3,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne L$ssse3_00_47
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb L$loop_ssse3
movq 88(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue_ssse3:
ret
.p2align 6
sha256_block_data_order_avx:
L$avx_shortcut:
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
L$prologue_avx:
vzeroupper
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
vmovdqa K256+512+32(%rip),%xmm8
vmovdqa K256+512+64(%rip),%xmm9
jmp L$loop_avx
.p2align 4
L$loop_avx:
vmovdqa K256+512(%rip),%xmm7
vmovdqu 0(%rsi),%xmm0
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm7,%xmm0,%xmm0
leaq K256(%rip),%rbp
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd 0(%rbp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 32(%rbp),%xmm1,%xmm5
vpaddd 64(%rbp),%xmm2,%xmm6
vpaddd 96(%rbp),%xmm3,%xmm7
vmovdqa %xmm4,0(%rsp)
movl %eax,%r14d
vmovdqa %xmm5,16(%rsp)
movl %ebx,%edi
vmovdqa %xmm6,32(%rsp)
xorl %ecx,%edi
vmovdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp L$avx_00_47
.p2align 4
L$avx_00_47:
subq $-128,%rbp
vpalignr $4,%xmm0,%xmm1,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm2,%xmm3,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm0,%xmm0
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm3,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm0,%xmm0
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm0,%xmm0
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
vpshufd $80,%xmm0,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm0,%xmm0
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 0(%rbp),%xmm0,%xmm6
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,0(%rsp)
vpalignr $4,%xmm1,%xmm2,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm3,%xmm0,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm1,%xmm1
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm0,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm1,%xmm1
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm1,%xmm1
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
vpshufd $80,%xmm1,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm1,%xmm1
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 32(%rbp),%xmm1,%xmm6
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,16(%rsp)
vpalignr $4,%xmm2,%xmm3,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm0,%xmm1,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm2,%xmm2
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm1,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm2,%xmm2
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm2,%xmm2
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
vpshufd $80,%xmm2,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm2,%xmm2
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 64(%rbp),%xmm2,%xmm6
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,32(%rsp)
vpalignr $4,%xmm3,%xmm0,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm1,%xmm2,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm3,%xmm3
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm2,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm3,%xmm3
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm3,%xmm3
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
vpshufd $80,%xmm3,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm3,%xmm3
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 96(%rbp),%xmm3,%xmm6
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne L$avx_00_47
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb L$loop_avx
movq 88(%rsp),%rsi
vzeroupper
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue_avx:
ret
#endif
|
chairq/First-choice
| 48,800
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/sha512-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.extern OPENSSL_ia32cap_P
.hidden OPENSSL_ia32cap_P
.globl sha512_block_data_order
.hidden sha512_block_data_order
.type sha512_block_data_order,@function
.align 16
sha512_block_data_order:
.cfi_startproc
_CET_ENDBR
leaq OPENSSL_ia32cap_P(%rip),%r11
movl 0(%r11),%r9d
movl 4(%r11),%r10d
movl 8(%r11),%r11d
andl $1073741824,%r9d
andl $268435968,%r10d
orl %r9d,%r10d
cmpl $1342177792,%r10d
je .Lavx_shortcut
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $128+32,%rsp
leaq (%rsi,%rdx,8),%rdx
andq $-64,%rsp
movq %rdi,128+0(%rsp)
movq %rsi,128+8(%rsp)
movq %rdx,128+16(%rsp)
movq %rax,152(%rsp)
.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08
.Lprologue:
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
movq 24(%rdi),%rdx
movq 32(%rdi),%r8
movq 40(%rdi),%r9
movq 48(%rdi),%r10
movq 56(%rdi),%r11
jmp .Lloop
.align 16
.Lloop:
movq %rbx,%rdi
leaq K512(%rip),%rbp
xorq %rcx,%rdi
movq 0(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,0(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
addq %r14,%r11
movq 8(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,8(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
addq %r14,%r10
movq 16(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,16(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
addq %r14,%r9
movq 24(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,24(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
addq %r14,%r8
movq 32(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,32(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
addq %r14,%rdx
movq 40(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,40(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
addq %r14,%rcx
movq 48(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,48(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
addq %r14,%rbx
movq 56(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,56(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
addq %r14,%rax
movq 64(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,64(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
addq %r14,%r11
movq 72(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,72(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
addq %r14,%r10
movq 80(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,80(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
addq %r14,%r9
movq 88(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,88(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
addq %r14,%r8
movq 96(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,96(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
addq %r14,%rdx
movq 104(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,104(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
addq %r14,%rcx
movq 112(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,112(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
addq %r14,%rbx
movq 120(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,120(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
movq 8(%rsp),%r13
movq 112(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rax
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 72(%rsp),%r12
addq 0(%rsp),%r12
movq %r8,%r13
addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,0(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
movq 16(%rsp),%r13
movq 120(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r11
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 80(%rsp),%r12
addq 8(%rsp),%r12
movq %rdx,%r13
addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,8(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
movq 24(%rsp),%r13
movq 0(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r10
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 88(%rsp),%r12
addq 16(%rsp),%r12
movq %rcx,%r13
addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,16(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
movq 32(%rsp),%r13
movq 8(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r9
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 96(%rsp),%r12
addq 24(%rsp),%r12
movq %rbx,%r13
addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,24(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
movq 40(%rsp),%r13
movq 16(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r8
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 104(%rsp),%r12
addq 32(%rsp),%r12
movq %rax,%r13
addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,32(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
movq 48(%rsp),%r13
movq 24(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rdx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 112(%rsp),%r12
addq 40(%rsp),%r12
movq %r11,%r13
addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,40(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
movq 56(%rsp),%r13
movq 32(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rcx
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 120(%rsp),%r12
addq 48(%rsp),%r12
movq %r10,%r13
addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,48(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
movq 64(%rsp),%r13
movq 40(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rbx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 0(%rsp),%r12
addq 56(%rsp),%r12
movq %r9,%r13
addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,56(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
movq 72(%rsp),%r13
movq 48(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rax
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 8(%rsp),%r12
addq 64(%rsp),%r12
movq %r8,%r13
addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,64(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
movq 80(%rsp),%r13
movq 56(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r11
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 16(%rsp),%r12
addq 72(%rsp),%r12
movq %rdx,%r13
addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,72(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
movq 88(%rsp),%r13
movq 64(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r10
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 24(%rsp),%r12
addq 80(%rsp),%r12
movq %rcx,%r13
addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,80(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
movq 96(%rsp),%r13
movq 72(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r9
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 32(%rsp),%r12
addq 88(%rsp),%r12
movq %rbx,%r13
addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,88(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
movq 104(%rsp),%r13
movq 80(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r8
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 40(%rsp),%r12
addq 96(%rsp),%r12
movq %rax,%r13
addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,96(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
movq 112(%rsp),%r13
movq 88(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rdx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 48(%rsp),%r12
addq 104(%rsp),%r12
movq %r11,%r13
addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,104(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
movq 120(%rsp),%r13
movq 96(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rcx
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 56(%rsp),%r12
addq 112(%rsp),%r12
movq %r10,%r13
addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,112(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
movq 0(%rsp),%r13
movq 104(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rbx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 64(%rsp),%r12
addq 120(%rsp),%r12
movq %r9,%r13
addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,120(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
cmpb $0,7(%rbp)
jnz .Lrounds_16_xx
movq 128+0(%rsp),%rdi
addq %r14,%rax
leaq 128(%rsi),%rsi
addq 0(%rdi),%rax
addq 8(%rdi),%rbx
addq 16(%rdi),%rcx
addq 24(%rdi),%rdx
addq 32(%rdi),%r8
addq 40(%rdi),%r9
addq 48(%rdi),%r10
addq 56(%rdi),%r11
cmpq 128+16(%rsp),%rsi
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
movq %rcx,16(%rdi)
movq %rdx,24(%rdi)
movq %r8,32(%rdi)
movq %r9,40(%rdi)
movq %r10,48(%rdi)
movq %r11,56(%rdi)
jb .Lloop
movq 152(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue:
ret
.cfi_endproc
.size sha512_block_data_order,.-sha512_block_data_order
.section .rodata
.align 64
.type K512,@object
K512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.type sha512_block_data_order_avx,@function
.align 64
sha512_block_data_order_avx:
.cfi_startproc
.Lavx_shortcut:
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $160,%rsp
leaq (%rsi,%rdx,8),%rdx
andq $-64,%rsp
movq %rdi,128+0(%rsp)
movq %rsi,128+8(%rsp)
movq %rdx,128+16(%rsp)
movq %rax,152(%rsp)
.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08
.Lprologue_avx:
vzeroupper
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
movq 24(%rdi),%rdx
movq 32(%rdi),%r8
movq 40(%rdi),%r9
movq 48(%rdi),%r10
movq 56(%rdi),%r11
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa K512+1280(%rip),%xmm11
vmovdqu 0(%rsi),%xmm0
leaq K512+128(%rip),%rbp
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vpshufb %xmm11,%xmm0,%xmm0
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm11,%xmm1,%xmm1
vmovdqu 64(%rsi),%xmm4
vpshufb %xmm11,%xmm2,%xmm2
vmovdqu 80(%rsi),%xmm5
vpshufb %xmm11,%xmm3,%xmm3
vmovdqu 96(%rsi),%xmm6
vpshufb %xmm11,%xmm4,%xmm4
vmovdqu 112(%rsi),%xmm7
vpshufb %xmm11,%xmm5,%xmm5
vpaddq -128(%rbp),%xmm0,%xmm8
vpshufb %xmm11,%xmm6,%xmm6
vpaddq -96(%rbp),%xmm1,%xmm9
vpshufb %xmm11,%xmm7,%xmm7
vpaddq -64(%rbp),%xmm2,%xmm10
vpaddq -32(%rbp),%xmm3,%xmm11
vmovdqa %xmm8,0(%rsp)
vpaddq 0(%rbp),%xmm4,%xmm8
vmovdqa %xmm9,16(%rsp)
vpaddq 32(%rbp),%xmm5,%xmm9
vmovdqa %xmm10,32(%rsp)
vpaddq 64(%rbp),%xmm6,%xmm10
vmovdqa %xmm11,48(%rsp)
vpaddq 96(%rbp),%xmm7,%xmm11
vmovdqa %xmm8,64(%rsp)
movq %rax,%r14
vmovdqa %xmm9,80(%rsp)
movq %rbx,%rdi
vmovdqa %xmm10,96(%rsp)
xorq %rcx,%rdi
vmovdqa %xmm11,112(%rsp)
movq %r8,%r13
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
addq $256,%rbp
vpalignr $8,%xmm0,%xmm1,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rax
vpalignr $8,%xmm4,%xmm5,%xmm11
movq %r9,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r8,%r13
xorq %r10,%r12
vpaddq %xmm11,%xmm0,%xmm0
shrdq $4,%r13,%r13
xorq %rax,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r8,%r12
xorq %r8,%r13
vpsllq $56,%xmm8,%xmm9
addq 0(%rsp),%r11
movq %rax,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r10,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rbx,%r15
addq %r12,%r11
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rax,%r14
addq %r13,%r11
vpxor %xmm10,%xmm8,%xmm8
xorq %rbx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm7,%xmm11
addq %r11,%rdx
addq %rdi,%r11
vpxor %xmm9,%xmm8,%xmm8
movq %rdx,%r13
addq %r11,%r14
vpsllq $3,%xmm7,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r11
vpaddq %xmm8,%xmm0,%xmm0
movq %r8,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm7,%xmm9
xorq %rdx,%r13
xorq %r9,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r11,%r14
vpsllq $42,%xmm10,%xmm10
andq %rdx,%r12
xorq %rdx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 8(%rsp),%r10
movq %r11,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r9,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rax,%rdi
addq %r12,%r10
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm0,%xmm0
xorq %r11,%r14
addq %r13,%r10
vpaddq -128(%rbp),%xmm0,%xmm10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
vmovdqa %xmm10,0(%rsp)
vpalignr $8,%xmm1,%xmm2,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r10
vpalignr $8,%xmm5,%xmm6,%xmm11
movq %rdx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rcx,%r13
xorq %r8,%r12
vpaddq %xmm11,%xmm1,%xmm1
shrdq $4,%r13,%r13
xorq %r10,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rcx,%r12
xorq %rcx,%r13
vpsllq $56,%xmm8,%xmm9
addq 16(%rsp),%r9
movq %r10,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r8,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r11,%r15
addq %r12,%r9
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r10,%r14
addq %r13,%r9
vpxor %xmm10,%xmm8,%xmm8
xorq %r11,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm0,%xmm11
addq %r9,%rbx
addq %rdi,%r9
vpxor %xmm9,%xmm8,%xmm8
movq %rbx,%r13
addq %r9,%r14
vpsllq $3,%xmm0,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r9
vpaddq %xmm8,%xmm1,%xmm1
movq %rcx,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm0,%xmm9
xorq %rbx,%r13
xorq %rdx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r9,%r14
vpsllq $42,%xmm10,%xmm10
andq %rbx,%r12
xorq %rbx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 24(%rsp),%r8
movq %r9,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rdx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r10,%rdi
addq %r12,%r8
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm1,%xmm1
xorq %r9,%r14
addq %r13,%r8
vpaddq -96(%rbp),%xmm1,%xmm10
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
vmovdqa %xmm10,16(%rsp)
vpalignr $8,%xmm2,%xmm3,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r8
vpalignr $8,%xmm6,%xmm7,%xmm11
movq %rbx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rax,%r13
xorq %rcx,%r12
vpaddq %xmm11,%xmm2,%xmm2
shrdq $4,%r13,%r13
xorq %r8,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rax,%r12
xorq %rax,%r13
vpsllq $56,%xmm8,%xmm9
addq 32(%rsp),%rdx
movq %r8,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rcx,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r9,%r15
addq %r12,%rdx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r8,%r14
addq %r13,%rdx
vpxor %xmm10,%xmm8,%xmm8
xorq %r9,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm1,%xmm11
addq %rdx,%r11
addq %rdi,%rdx
vpxor %xmm9,%xmm8,%xmm8
movq %r11,%r13
addq %rdx,%r14
vpsllq $3,%xmm1,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rdx
vpaddq %xmm8,%xmm2,%xmm2
movq %rax,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm1,%xmm9
xorq %r11,%r13
xorq %rbx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rdx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r11,%r12
xorq %r11,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 40(%rsp),%rcx
movq %rdx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rbx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r8,%rdi
addq %r12,%rcx
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm2,%xmm2
xorq %rdx,%r14
addq %r13,%rcx
vpaddq -64(%rbp),%xmm2,%xmm10
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
vmovdqa %xmm10,32(%rsp)
vpalignr $8,%xmm3,%xmm4,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rcx
vpalignr $8,%xmm7,%xmm0,%xmm11
movq %r11,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r10,%r13
xorq %rax,%r12
vpaddq %xmm11,%xmm3,%xmm3
shrdq $4,%r13,%r13
xorq %rcx,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r10,%r12
xorq %r10,%r13
vpsllq $56,%xmm8,%xmm9
addq 48(%rsp),%rbx
movq %rcx,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rax,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rdx,%r15
addq %r12,%rbx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rcx,%r14
addq %r13,%rbx
vpxor %xmm10,%xmm8,%xmm8
xorq %rdx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm2,%xmm11
addq %rbx,%r9
addq %rdi,%rbx
vpxor %xmm9,%xmm8,%xmm8
movq %r9,%r13
addq %rbx,%r14
vpsllq $3,%xmm2,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rbx
vpaddq %xmm8,%xmm3,%xmm3
movq %r10,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm2,%xmm9
xorq %r9,%r13
xorq %r11,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rbx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r9,%r12
xorq %r9,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 56(%rsp),%rax
movq %rbx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r11,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rcx,%rdi
addq %r12,%rax
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm3,%xmm3
xorq %rbx,%r14
addq %r13,%rax
vpaddq -32(%rbp),%xmm3,%xmm10
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
vmovdqa %xmm10,48(%rsp)
vpalignr $8,%xmm4,%xmm5,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rax
vpalignr $8,%xmm0,%xmm1,%xmm11
movq %r9,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r8,%r13
xorq %r10,%r12
vpaddq %xmm11,%xmm4,%xmm4
shrdq $4,%r13,%r13
xorq %rax,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r8,%r12
xorq %r8,%r13
vpsllq $56,%xmm8,%xmm9
addq 64(%rsp),%r11
movq %rax,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r10,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rbx,%r15
addq %r12,%r11
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rax,%r14
addq %r13,%r11
vpxor %xmm10,%xmm8,%xmm8
xorq %rbx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm3,%xmm11
addq %r11,%rdx
addq %rdi,%r11
vpxor %xmm9,%xmm8,%xmm8
movq %rdx,%r13
addq %r11,%r14
vpsllq $3,%xmm3,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r11
vpaddq %xmm8,%xmm4,%xmm4
movq %r8,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm3,%xmm9
xorq %rdx,%r13
xorq %r9,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r11,%r14
vpsllq $42,%xmm10,%xmm10
andq %rdx,%r12
xorq %rdx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 72(%rsp),%r10
movq %r11,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r9,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rax,%rdi
addq %r12,%r10
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm4,%xmm4
xorq %r11,%r14
addq %r13,%r10
vpaddq 0(%rbp),%xmm4,%xmm10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
vmovdqa %xmm10,64(%rsp)
vpalignr $8,%xmm5,%xmm6,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r10
vpalignr $8,%xmm1,%xmm2,%xmm11
movq %rdx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rcx,%r13
xorq %r8,%r12
vpaddq %xmm11,%xmm5,%xmm5
shrdq $4,%r13,%r13
xorq %r10,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rcx,%r12
xorq %rcx,%r13
vpsllq $56,%xmm8,%xmm9
addq 80(%rsp),%r9
movq %r10,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r8,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r11,%r15
addq %r12,%r9
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r10,%r14
addq %r13,%r9
vpxor %xmm10,%xmm8,%xmm8
xorq %r11,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm4,%xmm11
addq %r9,%rbx
addq %rdi,%r9
vpxor %xmm9,%xmm8,%xmm8
movq %rbx,%r13
addq %r9,%r14
vpsllq $3,%xmm4,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r9
vpaddq %xmm8,%xmm5,%xmm5
movq %rcx,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm4,%xmm9
xorq %rbx,%r13
xorq %rdx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r9,%r14
vpsllq $42,%xmm10,%xmm10
andq %rbx,%r12
xorq %rbx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 88(%rsp),%r8
movq %r9,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rdx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r10,%rdi
addq %r12,%r8
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm5,%xmm5
xorq %r9,%r14
addq %r13,%r8
vpaddq 32(%rbp),%xmm5,%xmm10
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
vmovdqa %xmm10,80(%rsp)
vpalignr $8,%xmm6,%xmm7,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r8
vpalignr $8,%xmm2,%xmm3,%xmm11
movq %rbx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rax,%r13
xorq %rcx,%r12
vpaddq %xmm11,%xmm6,%xmm6
shrdq $4,%r13,%r13
xorq %r8,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rax,%r12
xorq %rax,%r13
vpsllq $56,%xmm8,%xmm9
addq 96(%rsp),%rdx
movq %r8,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rcx,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r9,%r15
addq %r12,%rdx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r8,%r14
addq %r13,%rdx
vpxor %xmm10,%xmm8,%xmm8
xorq %r9,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm5,%xmm11
addq %rdx,%r11
addq %rdi,%rdx
vpxor %xmm9,%xmm8,%xmm8
movq %r11,%r13
addq %rdx,%r14
vpsllq $3,%xmm5,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rdx
vpaddq %xmm8,%xmm6,%xmm6
movq %rax,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm5,%xmm9
xorq %r11,%r13
xorq %rbx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rdx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r11,%r12
xorq %r11,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 104(%rsp),%rcx
movq %rdx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rbx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r8,%rdi
addq %r12,%rcx
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm6,%xmm6
xorq %rdx,%r14
addq %r13,%rcx
vpaddq 64(%rbp),%xmm6,%xmm10
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
vmovdqa %xmm10,96(%rsp)
vpalignr $8,%xmm7,%xmm0,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rcx
vpalignr $8,%xmm3,%xmm4,%xmm11
movq %r11,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r10,%r13
xorq %rax,%r12
vpaddq %xmm11,%xmm7,%xmm7
shrdq $4,%r13,%r13
xorq %rcx,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r10,%r12
xorq %r10,%r13
vpsllq $56,%xmm8,%xmm9
addq 112(%rsp),%rbx
movq %rcx,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rax,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rdx,%r15
addq %r12,%rbx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rcx,%r14
addq %r13,%rbx
vpxor %xmm10,%xmm8,%xmm8
xorq %rdx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm6,%xmm11
addq %rbx,%r9
addq %rdi,%rbx
vpxor %xmm9,%xmm8,%xmm8
movq %r9,%r13
addq %rbx,%r14
vpsllq $3,%xmm6,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rbx
vpaddq %xmm8,%xmm7,%xmm7
movq %r10,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm6,%xmm9
xorq %r9,%r13
xorq %r11,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rbx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r9,%r12
xorq %r9,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 120(%rsp),%rax
movq %rbx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r11,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rcx,%rdi
addq %r12,%rax
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm7,%xmm7
xorq %rbx,%r14
addq %r13,%rax
vpaddq 96(%rbp),%xmm7,%xmm10
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
vmovdqa %xmm10,112(%rsp)
cmpb $0,135(%rbp)
jne .Lavx_00_47
shrdq $23,%r13,%r13
movq %r14,%rax
movq %r9,%r12
shrdq $5,%r14,%r14
xorq %r8,%r13
xorq %r10,%r12
shrdq $4,%r13,%r13
xorq %rax,%r14
andq %r8,%r12
xorq %r8,%r13
addq 0(%rsp),%r11
movq %rax,%r15
xorq %r10,%r12
shrdq $6,%r14,%r14
xorq %rbx,%r15
addq %r12,%r11
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rax,%r14
addq %r13,%r11
xorq %rbx,%rdi
shrdq $28,%r14,%r14
addq %r11,%rdx
addq %rdi,%r11
movq %rdx,%r13
addq %r11,%r14
shrdq $23,%r13,%r13
movq %r14,%r11
movq %r8,%r12
shrdq $5,%r14,%r14
xorq %rdx,%r13
xorq %r9,%r12
shrdq $4,%r13,%r13
xorq %r11,%r14
andq %rdx,%r12
xorq %rdx,%r13
addq 8(%rsp),%r10
movq %r11,%rdi
xorq %r9,%r12
shrdq $6,%r14,%r14
xorq %rax,%rdi
addq %r12,%r10
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r11,%r14
addq %r13,%r10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
shrdq $23,%r13,%r13
movq %r14,%r10
movq %rdx,%r12
shrdq $5,%r14,%r14
xorq %rcx,%r13
xorq %r8,%r12
shrdq $4,%r13,%r13
xorq %r10,%r14
andq %rcx,%r12
xorq %rcx,%r13
addq 16(%rsp),%r9
movq %r10,%r15
xorq %r8,%r12
shrdq $6,%r14,%r14
xorq %r11,%r15
addq %r12,%r9
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r10,%r14
addq %r13,%r9
xorq %r11,%rdi
shrdq $28,%r14,%r14
addq %r9,%rbx
addq %rdi,%r9
movq %rbx,%r13
addq %r9,%r14
shrdq $23,%r13,%r13
movq %r14,%r9
movq %rcx,%r12
shrdq $5,%r14,%r14
xorq %rbx,%r13
xorq %rdx,%r12
shrdq $4,%r13,%r13
xorq %r9,%r14
andq %rbx,%r12
xorq %rbx,%r13
addq 24(%rsp),%r8
movq %r9,%rdi
xorq %rdx,%r12
shrdq $6,%r14,%r14
xorq %r10,%rdi
addq %r12,%r8
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r9,%r14
addq %r13,%r8
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
shrdq $23,%r13,%r13
movq %r14,%r8
movq %rbx,%r12
shrdq $5,%r14,%r14
xorq %rax,%r13
xorq %rcx,%r12
shrdq $4,%r13,%r13
xorq %r8,%r14
andq %rax,%r12
xorq %rax,%r13
addq 32(%rsp),%rdx
movq %r8,%r15
xorq %rcx,%r12
shrdq $6,%r14,%r14
xorq %r9,%r15
addq %r12,%rdx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r8,%r14
addq %r13,%rdx
xorq %r9,%rdi
shrdq $28,%r14,%r14
addq %rdx,%r11
addq %rdi,%rdx
movq %r11,%r13
addq %rdx,%r14
shrdq $23,%r13,%r13
movq %r14,%rdx
movq %rax,%r12
shrdq $5,%r14,%r14
xorq %r11,%r13
xorq %rbx,%r12
shrdq $4,%r13,%r13
xorq %rdx,%r14
andq %r11,%r12
xorq %r11,%r13
addq 40(%rsp),%rcx
movq %rdx,%rdi
xorq %rbx,%r12
shrdq $6,%r14,%r14
xorq %r8,%rdi
addq %r12,%rcx
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rdx,%r14
addq %r13,%rcx
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
shrdq $23,%r13,%r13
movq %r14,%rcx
movq %r11,%r12
shrdq $5,%r14,%r14
xorq %r10,%r13
xorq %rax,%r12
shrdq $4,%r13,%r13
xorq %rcx,%r14
andq %r10,%r12
xorq %r10,%r13
addq 48(%rsp),%rbx
movq %rcx,%r15
xorq %rax,%r12
shrdq $6,%r14,%r14
xorq %rdx,%r15
addq %r12,%rbx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rcx,%r14
addq %r13,%rbx
xorq %rdx,%rdi
shrdq $28,%r14,%r14
addq %rbx,%r9
addq %rdi,%rbx
movq %r9,%r13
addq %rbx,%r14
shrdq $23,%r13,%r13
movq %r14,%rbx
movq %r10,%r12
shrdq $5,%r14,%r14
xorq %r9,%r13
xorq %r11,%r12
shrdq $4,%r13,%r13
xorq %rbx,%r14
andq %r9,%r12
xorq %r9,%r13
addq 56(%rsp),%rax
movq %rbx,%rdi
xorq %r11,%r12
shrdq $6,%r14,%r14
xorq %rcx,%rdi
addq %r12,%rax
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rbx,%r14
addq %r13,%rax
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
shrdq $23,%r13,%r13
movq %r14,%rax
movq %r9,%r12
shrdq $5,%r14,%r14
xorq %r8,%r13
xorq %r10,%r12
shrdq $4,%r13,%r13
xorq %rax,%r14
andq %r8,%r12
xorq %r8,%r13
addq 64(%rsp),%r11
movq %rax,%r15
xorq %r10,%r12
shrdq $6,%r14,%r14
xorq %rbx,%r15
addq %r12,%r11
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rax,%r14
addq %r13,%r11
xorq %rbx,%rdi
shrdq $28,%r14,%r14
addq %r11,%rdx
addq %rdi,%r11
movq %rdx,%r13
addq %r11,%r14
shrdq $23,%r13,%r13
movq %r14,%r11
movq %r8,%r12
shrdq $5,%r14,%r14
xorq %rdx,%r13
xorq %r9,%r12
shrdq $4,%r13,%r13
xorq %r11,%r14
andq %rdx,%r12
xorq %rdx,%r13
addq 72(%rsp),%r10
movq %r11,%rdi
xorq %r9,%r12
shrdq $6,%r14,%r14
xorq %rax,%rdi
addq %r12,%r10
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r11,%r14
addq %r13,%r10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
shrdq $23,%r13,%r13
movq %r14,%r10
movq %rdx,%r12
shrdq $5,%r14,%r14
xorq %rcx,%r13
xorq %r8,%r12
shrdq $4,%r13,%r13
xorq %r10,%r14
andq %rcx,%r12
xorq %rcx,%r13
addq 80(%rsp),%r9
movq %r10,%r15
xorq %r8,%r12
shrdq $6,%r14,%r14
xorq %r11,%r15
addq %r12,%r9
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r10,%r14
addq %r13,%r9
xorq %r11,%rdi
shrdq $28,%r14,%r14
addq %r9,%rbx
addq %rdi,%r9
movq %rbx,%r13
addq %r9,%r14
shrdq $23,%r13,%r13
movq %r14,%r9
movq %rcx,%r12
shrdq $5,%r14,%r14
xorq %rbx,%r13
xorq %rdx,%r12
shrdq $4,%r13,%r13
xorq %r9,%r14
andq %rbx,%r12
xorq %rbx,%r13
addq 88(%rsp),%r8
movq %r9,%rdi
xorq %rdx,%r12
shrdq $6,%r14,%r14
xorq %r10,%rdi
addq %r12,%r8
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r9,%r14
addq %r13,%r8
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
shrdq $23,%r13,%r13
movq %r14,%r8
movq %rbx,%r12
shrdq $5,%r14,%r14
xorq %rax,%r13
xorq %rcx,%r12
shrdq $4,%r13,%r13
xorq %r8,%r14
andq %rax,%r12
xorq %rax,%r13
addq 96(%rsp),%rdx
movq %r8,%r15
xorq %rcx,%r12
shrdq $6,%r14,%r14
xorq %r9,%r15
addq %r12,%rdx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r8,%r14
addq %r13,%rdx
xorq %r9,%rdi
shrdq $28,%r14,%r14
addq %rdx,%r11
addq %rdi,%rdx
movq %r11,%r13
addq %rdx,%r14
shrdq $23,%r13,%r13
movq %r14,%rdx
movq %rax,%r12
shrdq $5,%r14,%r14
xorq %r11,%r13
xorq %rbx,%r12
shrdq $4,%r13,%r13
xorq %rdx,%r14
andq %r11,%r12
xorq %r11,%r13
addq 104(%rsp),%rcx
movq %rdx,%rdi
xorq %rbx,%r12
shrdq $6,%r14,%r14
xorq %r8,%rdi
addq %r12,%rcx
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rdx,%r14
addq %r13,%rcx
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
shrdq $23,%r13,%r13
movq %r14,%rcx
movq %r11,%r12
shrdq $5,%r14,%r14
xorq %r10,%r13
xorq %rax,%r12
shrdq $4,%r13,%r13
xorq %rcx,%r14
andq %r10,%r12
xorq %r10,%r13
addq 112(%rsp),%rbx
movq %rcx,%r15
xorq %rax,%r12
shrdq $6,%r14,%r14
xorq %rdx,%r15
addq %r12,%rbx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rcx,%r14
addq %r13,%rbx
xorq %rdx,%rdi
shrdq $28,%r14,%r14
addq %rbx,%r9
addq %rdi,%rbx
movq %r9,%r13
addq %rbx,%r14
shrdq $23,%r13,%r13
movq %r14,%rbx
movq %r10,%r12
shrdq $5,%r14,%r14
xorq %r9,%r13
xorq %r11,%r12
shrdq $4,%r13,%r13
xorq %rbx,%r14
andq %r9,%r12
xorq %r9,%r13
addq 120(%rsp),%rax
movq %rbx,%rdi
xorq %r11,%r12
shrdq $6,%r14,%r14
xorq %rcx,%rdi
addq %r12,%rax
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rbx,%r14
addq %r13,%rax
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
movq 128+0(%rsp),%rdi
movq %r14,%rax
addq 0(%rdi),%rax
leaq 128(%rsi),%rsi
addq 8(%rdi),%rbx
addq 16(%rdi),%rcx
addq 24(%rdi),%rdx
addq 32(%rdi),%r8
addq 40(%rdi),%r9
addq 48(%rdi),%r10
addq 56(%rdi),%r11
cmpq 128+16(%rsp),%rsi
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
movq %rcx,16(%rdi)
movq %rdx,24(%rdi)
movq %r8,32(%rdi)
movq %r9,40(%rdi)
movq %r10,48(%rdi)
movq %r11,56(%rdi)
jb .Lloop_avx
movq 152(%rsp),%rsi
.cfi_def_cfa %rsi,8
vzeroupper
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx:
ret
.cfi_endproc
.size sha512_block_data_order_avx,.-sha512_block_data_order_avx
#endif
|
chairq/First-choice
| 55,554
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/x86_64-mont5-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _bn_mul_mont_gather5
.private_extern _bn_mul_mont_gather5
.p2align 6
_bn_mul_mont_gather5:
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
testl $7,%r9d
jnz L$mul_enter
leaq _OPENSSL_ia32cap_P(%rip),%r11
movl 8(%r11),%r11d
jmp L$mul4x_enter
.p2align 4
L$mul_enter:
movd 8(%rsp),%xmm5
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
negq %r9
movq %rsp,%r11
leaq -280(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul_page_walk
jmp L$mul_page_walk_done
L$mul_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul_page_walk
L$mul_page_walk_done:
leaq L$inc(%rip),%r10
movq %rax,8(%rsp,%r9,8)
L$mul_body:
leaq 128(%rdx),%r12
movdqa 0(%r10),%xmm0
movdqa 16(%r10),%xmm1
leaq 24-112(%rsp,%r9,8),%r10
andq $-16,%r10
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
.byte 0x67
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,112(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,128(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,144(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,160(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,176(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,192(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,208(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,224(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,240(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,256(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,272(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,288(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,304(%r10)
paddd %xmm2,%xmm3
.byte 0x67
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,320(%r10)
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,336(%r10)
pand 64(%r12),%xmm0
pand 80(%r12),%xmm1
pand 96(%r12),%xmm2
movdqa %xmm3,352(%r10)
pand 112(%r12),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -128(%r12),%xmm4
movdqa -112(%r12),%xmm5
movdqa -96(%r12),%xmm2
pand 112(%r10),%xmm4
movdqa -80(%r12),%xmm3
pand 128(%r10),%xmm5
por %xmm4,%xmm0
pand 144(%r10),%xmm2
por %xmm5,%xmm1
pand 160(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -64(%r12),%xmm4
movdqa -48(%r12),%xmm5
movdqa -32(%r12),%xmm2
pand 176(%r10),%xmm4
movdqa -16(%r12),%xmm3
pand 192(%r10),%xmm5
por %xmm4,%xmm0
pand 208(%r10),%xmm2
por %xmm5,%xmm1
pand 224(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa 0(%r12),%xmm4
movdqa 16(%r12),%xmm5
movdqa 32(%r12),%xmm2
pand 240(%r10),%xmm4
movdqa 48(%r12),%xmm3
pand 256(%r10),%xmm5
por %xmm4,%xmm0
pand 272(%r10),%xmm2
por %xmm5,%xmm1
pand 288(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
por %xmm1,%xmm0
pshufd $0x4e,%xmm0,%xmm1
por %xmm1,%xmm0
leaq 256(%r12),%r12
.byte 102,72,15,126,195
movq (%r8),%r8
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%r13
leaq 1(%r15),%r15
jmp L$1st_enter
.p2align 4
L$1st:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r13
movq %r10,%r11
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
L$1st_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 1(%r15),%r15
movq %rdx,%r10
mulq %rbp
cmpq %r9,%r15
jne L$1st
addq %rax,%r13
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-16(%rsp,%r9,8)
movq %rdx,%r13
movq %r10,%r11
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
jmp L$outer
.p2align 4
L$outer:
leaq 24+128(%rsp,%r9,8),%rdx
andq $-16,%rdx
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa -128(%r12),%xmm0
movdqa -112(%r12),%xmm1
movdqa -96(%r12),%xmm2
movdqa -80(%r12),%xmm3
pand -128(%rdx),%xmm0
pand -112(%rdx),%xmm1
por %xmm0,%xmm4
pand -96(%rdx),%xmm2
por %xmm1,%xmm5
pand -80(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%r12),%xmm0
movdqa -48(%r12),%xmm1
movdqa -32(%r12),%xmm2
movdqa -16(%r12),%xmm3
pand -64(%rdx),%xmm0
pand -48(%rdx),%xmm1
por %xmm0,%xmm4
pand -32(%rdx),%xmm2
por %xmm1,%xmm5
pand -16(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%r12),%xmm0
movdqa 16(%r12),%xmm1
movdqa 32(%r12),%xmm2
movdqa 48(%r12),%xmm3
pand 0(%rdx),%xmm0
pand 16(%rdx),%xmm1
por %xmm0,%xmm4
pand 32(%rdx),%xmm2
por %xmm1,%xmm5
pand 48(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%r12),%xmm0
movdqa 80(%r12),%xmm1
movdqa 96(%r12),%xmm2
movdqa 112(%r12),%xmm3
pand 64(%rdx),%xmm0
pand 80(%rdx),%xmm1
por %xmm0,%xmm4
pand 96(%rdx),%xmm2
por %xmm1,%xmm5
pand 112(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
leaq 256(%r12),%r12
movq (%rsi),%rax
.byte 102,72,15,126,195
xorq %r15,%r15
movq %r8,%rbp
movq (%rsp),%r10
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq 8(%rsp),%r10
movq %rdx,%r13
leaq 1(%r15),%r15
jmp L$inner_enter
.p2align 4
L$inner:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
L$inner_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
leaq 1(%r15),%r15
mulq %rbp
cmpq %r9,%r15
jne L$inner
addq %rax,%r13
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r9,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r9,8)
movq %rdx,%r13
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
cmpq %r9,%r14
jb L$outer
xorq %r14,%r14
movq (%rsp),%rax
leaq (%rsp),%rsi
movq %r9,%r15
jmp L$sub
.p2align 4
L$sub: sbbq (%rcx,%r14,8),%rax
movq %rax,(%rdi,%r14,8)
movq 8(%rsi,%r14,8),%rax
leaq 1(%r14),%r14
decq %r15
jnz L$sub
sbbq $0,%rax
movq $-1,%rbx
xorq %rax,%rbx
xorq %r14,%r14
movq %r9,%r15
L$copy:
movq (%rdi,%r14,8),%rcx
movq (%rsp,%r14,8),%rdx
andq %rbx,%rcx
andq %rax,%rdx
movq %r14,(%rsp,%r14,8)
orq %rcx,%rdx
movq %rdx,(%rdi,%r14,8)
leaq 1(%r14),%r14
subq $1,%r15
jnz L$copy
movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mul_epilogue:
ret
.p2align 5
bn_mul4x_mont_gather5:
.byte 0x67
movq %rsp,%rax
L$mul4x_enter:
andl $0x80108,%r11d
cmpl $0x80108,%r11d
je L$mulx4x_enter
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mul4x_prologue:
.byte 0x67
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$mul4xsp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$mul4xsp_done
.p2align 5
L$mul4xsp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$mul4xsp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mul4x_page_walk
jmp L$mul4x_page_walk_done
L$mul4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mul4x_page_walk
L$mul4x_page_walk_done:
negq %r9
movq %rax,40(%rsp)
L$mul4x_body:
call mul4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mul4x_epilogue:
ret
.p2align 5
mul4x_internal:
shlq $5,%r9
movd 8(%rax),%xmm5
leaq L$inc(%rip),%rax
leaq 128(%rdx,%r9,1),%r13
shrq $5,%r9
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 88-112(%rsp,%r9,1),%r10
leaq 128(%rdx),%r12
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
.byte 0x67,0x67
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
.byte 0x67
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,112(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,128(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,144(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,160(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,176(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,192(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,208(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,224(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,240(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,256(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,272(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,288(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,304(%r10)
paddd %xmm2,%xmm3
.byte 0x67
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,320(%r10)
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,336(%r10)
pand 64(%r12),%xmm0
pand 80(%r12),%xmm1
pand 96(%r12),%xmm2
movdqa %xmm3,352(%r10)
pand 112(%r12),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -128(%r12),%xmm4
movdqa -112(%r12),%xmm5
movdqa -96(%r12),%xmm2
pand 112(%r10),%xmm4
movdqa -80(%r12),%xmm3
pand 128(%r10),%xmm5
por %xmm4,%xmm0
pand 144(%r10),%xmm2
por %xmm5,%xmm1
pand 160(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -64(%r12),%xmm4
movdqa -48(%r12),%xmm5
movdqa -32(%r12),%xmm2
pand 176(%r10),%xmm4
movdqa -16(%r12),%xmm3
pand 192(%r10),%xmm5
por %xmm4,%xmm0
pand 208(%r10),%xmm2
por %xmm5,%xmm1
pand 224(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa 0(%r12),%xmm4
movdqa 16(%r12),%xmm5
movdqa 32(%r12),%xmm2
pand 240(%r10),%xmm4
movdqa 48(%r12),%xmm3
pand 256(%r10),%xmm5
por %xmm4,%xmm0
pand 272(%r10),%xmm2
por %xmm5,%xmm1
pand 288(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
por %xmm1,%xmm0
pshufd $0x4e,%xmm0,%xmm1
por %xmm1,%xmm0
leaq 256(%r12),%r12
.byte 102,72,15,126,195
movq %r13,16+8(%rsp)
movq %rdi,56+8(%rsp)
movq (%r8),%r8
movq (%rsi),%rax
leaq (%rsi,%r9,1),%rsi
negq %r9
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
leaq 64+8(%rsp),%r14
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi,%r9,1),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%r9),%r15
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdi,(%r14)
movq %rdx,%r13
jmp L$1st4x
.p2align 5
L$1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq 0(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdi,(%r14)
movq %rdx,%r13
addq $32,%r15
jnz L$1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%r13
leaq (%rcx,%r9,1),%rcx
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%r14)
jmp L$outer4x
.p2align 5
L$outer4x:
leaq 16+128(%r14),%rdx
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa -128(%r12),%xmm0
movdqa -112(%r12),%xmm1
movdqa -96(%r12),%xmm2
movdqa -80(%r12),%xmm3
pand -128(%rdx),%xmm0
pand -112(%rdx),%xmm1
por %xmm0,%xmm4
pand -96(%rdx),%xmm2
por %xmm1,%xmm5
pand -80(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%r12),%xmm0
movdqa -48(%r12),%xmm1
movdqa -32(%r12),%xmm2
movdqa -16(%r12),%xmm3
pand -64(%rdx),%xmm0
pand -48(%rdx),%xmm1
por %xmm0,%xmm4
pand -32(%rdx),%xmm2
por %xmm1,%xmm5
pand -16(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%r12),%xmm0
movdqa 16(%r12),%xmm1
movdqa 32(%r12),%xmm2
movdqa 48(%r12),%xmm3
pand 0(%rdx),%xmm0
pand 16(%rdx),%xmm1
por %xmm0,%xmm4
pand 32(%rdx),%xmm2
por %xmm1,%xmm5
pand 48(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%r12),%xmm0
movdqa 80(%r12),%xmm1
movdqa 96(%r12),%xmm2
movdqa 112(%r12),%xmm3
pand 64(%rdx),%xmm0
pand 80(%rdx),%xmm1
por %xmm0,%xmm4
pand 96(%rdx),%xmm2
por %xmm1,%xmm5
pand 112(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
leaq 256(%r12),%r12
.byte 102,72,15,126,195
movq (%r14,%r9,1),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
movq %rdi,(%r14)
leaq (%r14,%r9,1),%r14
mulq %rbp
addq %rax,%r10
movq 8(%rsi,%r9,1),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%r9),%r15
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdx,%r13
jmp L$inner4x
.p2align 5
L$inner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
adcq $0,%rdx
addq 16(%r14),%r10
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-32(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
addq -8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq 0(%rcx),%rax
adcq $0,%rdx
addq (%r14),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %r13,-8(%r14)
movq %rdx,%r13
addq $32,%r15
jnz L$inner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
adcq $0,%rdx
addq 16(%r14),%r10
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-32(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq %rbp,%rax
movq -8(%rcx),%rbp
adcq $0,%rdx
addq -8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%r13
movq %rdi,-16(%r14)
leaq (%rcx,%r9,1),%rcx
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%r14),%r13
adcq $0,%rdi
movq %r13,-8(%r14)
cmpq 16+8(%rsp),%r12
jb L$outer4x
xorq %rax,%rax
subq %r13,%rbp
adcq %r15,%r15
orq %r15,%rdi
subq %rdi,%rax
leaq (%r14,%r9,1),%rbx
movq (%rcx),%r12
leaq (%rcx),%rbp
movq %r9,%rcx
sarq $3+2,%rcx
movq 56+8(%rsp),%rdi
decq %r12
xorq %r10,%r10
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqr4x_sub_entry
.globl _bn_power5
.private_extern _bn_power5
.p2align 5
_bn_power5:
_CET_ENDBR
movq %rsp,%rax
leaq _OPENSSL_ia32cap_P(%rip),%r11
movl 8(%r11),%r11d
andl $0x80108,%r11d
cmpl $0x80108,%r11d
je L$powerx5_enter
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$power5_prologue:
shll $3,%r9d
leal (%r9,%r9,2),%r10d
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$pwr_sp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$pwr_sp_done
.p2align 5
L$pwr_sp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$pwr_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwr_page_walk
jmp L$pwr_page_walk_done
L$pwr_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwr_page_walk
L$pwr_page_walk_done:
movq %r9,%r10
negq %r9
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$power5_body:
.byte 102,72,15,110,207
.byte 102,72,15,110,209
.byte 102,73,15,110,218
.byte 102,72,15,110,226
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
.byte 102,72,15,126,209
.byte 102,72,15,126,226
movq %rsi,%rdi
movq 40(%rsp),%rax
leaq 32(%rsp),%r8
call mul4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$power5_epilogue:
ret
.globl _bn_sqr8x_internal
.private_extern _bn_sqr8x_internal
.private_extern _bn_sqr8x_internal
.p2align 5
_bn_sqr8x_internal:
__bn_sqr8x_internal:
_CET_ENDBR
leaq 32(%r10),%rbp
leaq (%rsi,%r9,1),%rsi
movq %r9,%rcx
movq -32(%rsi,%rbp,1),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi,%rbp,1),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi,%rbp,1),%rbx
movq %rax,%r15
mulq %r14
movq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
movq %r10,-24(%rdi,%rbp,1)
mulq %r14
addq %rax,%r11
movq %rbx,%rax
adcq $0,%rdx
movq %r11,-16(%rdi,%rbp,1)
movq %rdx,%r10
movq -8(%rsi,%rbp,1),%rbx
mulq %r15
movq %rax,%r12
movq %rbx,%rax
movq %rdx,%r13
leaq (%rbp),%rcx
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
jmp L$sqr4x_1st
.p2align 5
L$sqr4x_1st:
movq (%rsi,%rcx,1),%rbx
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %rdx,%r12
adcq $0,%r12
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 8(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,(%rdi,%rcx,1)
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq 16(%rsi,%rcx,1),%rbx
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %r10,8(%rdi,%rcx,1)
movq %rdx,%r12
adcq $0,%r12
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 24(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,16(%rdi,%rcx,1)
movq %rdx,%r13
adcq $0,%r13
leaq 32(%rcx),%rcx
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
cmpq $0,%rcx
jne L$sqr4x_1st
mulq %r15
addq %rax,%r13
leaq 16(%rbp),%rbp
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
jmp L$sqr4x_outer
.p2align 5
L$sqr4x_outer:
movq -32(%rsi,%rbp,1),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi,%rbp,1),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi,%rbp,1),%rbx
movq %rax,%r15
mulq %r14
movq -24(%rdi,%rbp,1),%r10
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
movq %r10,-24(%rdi,%rbp,1)
movq %rdx,%r11
mulq %r14
addq %rax,%r11
movq %rbx,%rax
adcq $0,%rdx
addq -16(%rdi,%rbp,1),%r11
movq %rdx,%r10
adcq $0,%r10
movq %r11,-16(%rdi,%rbp,1)
xorq %r12,%r12
movq -8(%rsi,%rbp,1),%rbx
mulq %r15
addq %rax,%r12
movq %rbx,%rax
adcq $0,%rdx
addq -8(%rdi,%rbp,1),%r12
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
addq %r12,%r10
movq %rdx,%r11
adcq $0,%r11
movq %r10,-8(%rdi,%rbp,1)
leaq (%rbp),%rcx
jmp L$sqr4x_inner
.p2align 5
L$sqr4x_inner:
movq (%rsi,%rcx,1),%rbx
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %rdx,%r12
adcq $0,%r12
addq (%rdi,%rcx,1),%r13
adcq $0,%r12
.byte 0x67
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 8(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %r11,(%rdi,%rcx,1)
movq %rbx,%rax
movq %rdx,%r13
adcq $0,%r13
addq 8(%rdi,%rcx,1),%r12
leaq 16(%rcx),%rcx
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
addq %r12,%r10
movq %rdx,%r11
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
cmpq $0,%rcx
jne L$sqr4x_inner
.byte 0x67
mulq %r15
addq %rax,%r13
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
addq $16,%rbp
jnz L$sqr4x_outer
movq -32(%rsi),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi),%rbx
movq %rax,%r15
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq %r10,-24(%rdi)
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
movq -8(%rsi),%rbx
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,-16(%rdi)
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi)
mulq %r15
addq %rax,%r13
movq -16(%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
mulq %rbx
addq $16,%rbp
xorq %r14,%r14
subq %r9,%rbp
xorq %r15,%r15
addq %r12,%rax
adcq $0,%rdx
movq %rax,8(%rdi)
movq %rdx,16(%rdi)
movq %r15,24(%rdi)
movq -16(%rsi,%rbp,1),%rax
leaq 48+8(%rsp),%rdi
xorq %r10,%r10
movq 8(%rdi),%r11
leaq (%r14,%r10,2),%r12
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq 16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 24(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi,%rbp,1),%rax
movq %r12,(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 32(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 40(%rdi),%r11
adcq %rax,%rbx
movq 0(%rsi,%rbp,1),%rax
movq %rbx,16(%rdi)
adcq %rdx,%r8
leaq 16(%rbp),%rbp
movq %r8,24(%rdi)
sbbq %r15,%r15
leaq 64(%rdi),%rdi
jmp L$sqr4x_shift_n_add
.p2align 5
L$sqr4x_shift_n_add:
leaq (%r14,%r10,2),%r12
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq -16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq -8(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi,%rbp,1),%rax
movq %r12,-32(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,-24(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 0(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 8(%rdi),%r11
adcq %rax,%rbx
movq 0(%rsi,%rbp,1),%rax
movq %rbx,-16(%rdi)
adcq %rdx,%r8
leaq (%r14,%r10,2),%r12
movq %r8,-8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq 16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 24(%rdi),%r11
adcq %rax,%r12
movq 8(%rsi,%rbp,1),%rax
movq %r12,0(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 32(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 40(%rdi),%r11
adcq %rax,%rbx
movq 16(%rsi,%rbp,1),%rax
movq %rbx,16(%rdi)
adcq %rdx,%r8
movq %r8,24(%rdi)
sbbq %r15,%r15
leaq 64(%rdi),%rdi
addq $32,%rbp
jnz L$sqr4x_shift_n_add
leaq (%r14,%r10,2),%r12
.byte 0x67
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq -16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq -8(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi),%rax
movq %r12,-32(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,-24(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
mulq %rax
negq %r15
adcq %rax,%rbx
adcq %rdx,%r8
movq %rbx,-16(%rdi)
movq %r8,-8(%rdi)
.byte 102,72,15,126,213
__bn_sqr8x_reduction:
xorq %rax,%rax
leaq (%r9,%rbp,1),%rcx
leaq 48+8(%rsp,%r9,2),%rdx
movq %rcx,0+8(%rsp)
leaq 48+8(%rsp,%r9,1),%rdi
movq %rdx,8+8(%rsp)
negq %r9
jmp L$8x_reduction_loop
.p2align 5
L$8x_reduction_loop:
leaq (%rdi,%r9,1),%rdi
.byte 0x66
movq 0(%rdi),%rbx
movq 8(%rdi),%r9
movq 16(%rdi),%r10
movq 24(%rdi),%r11
movq 32(%rdi),%r12
movq 40(%rdi),%r13
movq 48(%rdi),%r14
movq 56(%rdi),%r15
movq %rax,(%rdx)
leaq 64(%rdi),%rdi
.byte 0x67
movq %rbx,%r8
imulq 32+8(%rsp),%rbx
movq 0(%rbp),%rax
movl $8,%ecx
jmp L$8x_reduce
.p2align 5
L$8x_reduce:
mulq %rbx
movq 8(%rbp),%rax
negq %r8
movq %rdx,%r8
adcq $0,%r8
mulq %rbx
addq %rax,%r9
movq 16(%rbp),%rax
adcq $0,%rdx
addq %r9,%r8
movq %rbx,48-8+8(%rsp,%rcx,8)
movq %rdx,%r9
adcq $0,%r9
mulq %rbx
addq %rax,%r10
movq 24(%rbp),%rax
adcq $0,%rdx
addq %r10,%r9
movq 32+8(%rsp),%rsi
movq %rdx,%r10
adcq $0,%r10
mulq %rbx
addq %rax,%r11
movq 32(%rbp),%rax
adcq $0,%rdx
imulq %r8,%rsi
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
mulq %rbx
addq %rax,%r12
movq 40(%rbp),%rax
adcq $0,%rdx
addq %r12,%r11
movq %rdx,%r12
adcq $0,%r12
mulq %rbx
addq %rax,%r13
movq 48(%rbp),%rax
adcq $0,%rdx
addq %r13,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %rbx
addq %rax,%r14
movq 56(%rbp),%rax
adcq $0,%rdx
addq %r14,%r13
movq %rdx,%r14
adcq $0,%r14
mulq %rbx
movq %rsi,%rbx
addq %rax,%r15
movq 0(%rbp),%rax
adcq $0,%rdx
addq %r15,%r14
movq %rdx,%r15
adcq $0,%r15
decl %ecx
jnz L$8x_reduce
leaq 64(%rbp),%rbp
xorq %rax,%rax
movq 8+8(%rsp),%rdx
cmpq 0+8(%rsp),%rbp
jae L$8x_no_tail
.byte 0x66
addq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
sbbq %rsi,%rsi
movq 48+56+8(%rsp),%rbx
movl $8,%ecx
movq 0(%rbp),%rax
jmp L$8x_tail
.p2align 5
L$8x_tail:
mulq %rbx
addq %rax,%r8
movq 8(%rbp),%rax
movq %r8,(%rdi)
movq %rdx,%r8
adcq $0,%r8
mulq %rbx
addq %rax,%r9
movq 16(%rbp),%rax
adcq $0,%rdx
addq %r9,%r8
leaq 8(%rdi),%rdi
movq %rdx,%r9
adcq $0,%r9
mulq %rbx
addq %rax,%r10
movq 24(%rbp),%rax
adcq $0,%rdx
addq %r10,%r9
movq %rdx,%r10
adcq $0,%r10
mulq %rbx
addq %rax,%r11
movq 32(%rbp),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
mulq %rbx
addq %rax,%r12
movq 40(%rbp),%rax
adcq $0,%rdx
addq %r12,%r11
movq %rdx,%r12
adcq $0,%r12
mulq %rbx
addq %rax,%r13
movq 48(%rbp),%rax
adcq $0,%rdx
addq %r13,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %rbx
addq %rax,%r14
movq 56(%rbp),%rax
adcq $0,%rdx
addq %r14,%r13
movq %rdx,%r14
adcq $0,%r14
mulq %rbx
movq 48-16+8(%rsp,%rcx,8),%rbx
addq %rax,%r15
adcq $0,%rdx
addq %r15,%r14
movq 0(%rbp),%rax
movq %rdx,%r15
adcq $0,%r15
decl %ecx
jnz L$8x_tail
leaq 64(%rbp),%rbp
movq 8+8(%rsp),%rdx
cmpq 0+8(%rsp),%rbp
jae L$8x_tail_done
movq 48+56+8(%rsp),%rbx
negq %rsi
movq 0(%rbp),%rax
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
sbbq %rsi,%rsi
movl $8,%ecx
jmp L$8x_tail
.p2align 5
L$8x_tail_done:
xorq %rax,%rax
addq (%rdx),%r8
adcq $0,%r9
adcq $0,%r10
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
adcq $0,%rax
negq %rsi
L$8x_no_tail:
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
adcq $0,%rax
movq -8(%rbp),%rcx
xorq %rsi,%rsi
.byte 102,72,15,126,213
movq %r8,0(%rdi)
movq %r9,8(%rdi)
.byte 102,73,15,126,217
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq %r12,32(%rdi)
movq %r13,40(%rdi)
movq %r14,48(%rdi)
movq %r15,56(%rdi)
leaq 64(%rdi),%rdi
cmpq %rdx,%rdi
jb L$8x_reduction_loop
ret
.p2align 5
__bn_post4x_internal:
movq 0(%rbp),%r12
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
.byte 102,72,15,126,207
negq %rax
.byte 102,72,15,126,206
sarq $3+2,%rcx
decq %r12
xorq %r10,%r10
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqr4x_sub_entry
.p2align 4
L$sqr4x_sub:
movq 0(%rbp),%r12
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
L$sqr4x_sub_entry:
leaq 32(%rbp),%rbp
notq %r12
notq %r13
notq %r14
notq %r15
andq %rax,%r12
andq %rax,%r13
andq %rax,%r14
andq %rax,%r15
negq %r10
adcq 0(%rbx),%r12
adcq 8(%rbx),%r13
adcq 16(%rbx),%r14
adcq 24(%rbx),%r15
movq %r12,0(%rdi)
leaq 32(%rbx),%rbx
movq %r13,8(%rdi)
sbbq %r10,%r10
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz L$sqr4x_sub
movq %r9,%r10
negq %r9
ret
.p2align 5
bn_mulx4x_mont_gather5:
movq %rsp,%rax
L$mulx4x_enter:
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mulx4x_prologue:
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$mulx4xsp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$mulx4xsp_done
L$mulx4xsp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$mulx4xsp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
jmp L$mulx4x_page_walk_done
L$mulx4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
L$mulx4x_page_walk_done:
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$mulx4x_body:
call mulx4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mulx4x_epilogue:
ret
.p2align 5
mulx4x_internal:
movq %r9,8(%rsp)
movq %r9,%r10
negq %r9
shlq $5,%r9
negq %r10
leaq 128(%rdx,%r9,1),%r13
shrq $5+5,%r9
movd 8(%rax),%xmm5
subq $1,%r9
leaq L$inc(%rip),%rax
movq %r13,16+8(%rsp)
movq %r9,24+8(%rsp)
movq %rdi,56+8(%rsp)
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 88-112(%rsp,%r10,1),%r10
leaq 128(%rdx),%rdi
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
.byte 0x67
movdqa %xmm1,%xmm2
.byte 0x67
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,112(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,128(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,144(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,160(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,176(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,192(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,208(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,224(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,240(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,256(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,272(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,288(%r10)
movdqa %xmm4,%xmm3
.byte 0x67
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,304(%r10)
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,320(%r10)
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,336(%r10)
pand 64(%rdi),%xmm0
pand 80(%rdi),%xmm1
pand 96(%rdi),%xmm2
movdqa %xmm3,352(%r10)
pand 112(%rdi),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -128(%rdi),%xmm4
movdqa -112(%rdi),%xmm5
movdqa -96(%rdi),%xmm2
pand 112(%r10),%xmm4
movdqa -80(%rdi),%xmm3
pand 128(%r10),%xmm5
por %xmm4,%xmm0
pand 144(%r10),%xmm2
por %xmm5,%xmm1
pand 160(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -64(%rdi),%xmm4
movdqa -48(%rdi),%xmm5
movdqa -32(%rdi),%xmm2
pand 176(%r10),%xmm4
movdqa -16(%rdi),%xmm3
pand 192(%r10),%xmm5
por %xmm4,%xmm0
pand 208(%r10),%xmm2
por %xmm5,%xmm1
pand 224(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa 0(%rdi),%xmm4
movdqa 16(%rdi),%xmm5
movdqa 32(%rdi),%xmm2
pand 240(%r10),%xmm4
movdqa 48(%rdi),%xmm3
pand 256(%r10),%xmm5
por %xmm4,%xmm0
pand 272(%r10),%xmm2
por %xmm5,%xmm1
pand 288(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
pxor %xmm1,%xmm0
pshufd $0x4e,%xmm0,%xmm1
por %xmm1,%xmm0
leaq 256(%rdi),%rdi
.byte 102,72,15,126,194
leaq 64+32+8(%rsp),%rbx
movq %rdx,%r9
mulxq 0(%rsi),%r8,%rax
mulxq 8(%rsi),%r11,%r12
addq %rax,%r11
mulxq 16(%rsi),%rax,%r13
adcq %rax,%r12
adcq $0,%r13
mulxq 24(%rsi),%rax,%r14
movq %r8,%r15
imulq 32+8(%rsp),%r8
xorq %rbp,%rbp
movq %r8,%rdx
movq %rdi,8+8(%rsp)
leaq 32(%rsi),%rsi
adcxq %rax,%r13
adcxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
movq 24+8(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
adcxq %rax,%r12
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r12,-16(%rbx)
jmp L$mulx4x_1st
.p2align 5
L$mulx4x_1st:
adcxq %rbp,%r15
mulxq 0(%rsi),%r10,%rax
adcxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
.byte 0x67,0x67
movq %r8,%rdx
adcxq %rax,%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
movq %r11,-32(%rbx)
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_1st
movq 8(%rsp),%rax
adcq %rbp,%r15
leaq (%rsi,%rax,1),%rsi
addq %r15,%r14
movq 8+8(%rsp),%rdi
adcq %rbp,%rbp
movq %r14,-8(%rbx)
jmp L$mulx4x_outer
.p2align 5
L$mulx4x_outer:
leaq 16-256(%rbx),%r10
pxor %xmm4,%xmm4
.byte 0x67,0x67
pxor %xmm5,%xmm5
movdqa -128(%rdi),%xmm0
movdqa -112(%rdi),%xmm1
movdqa -96(%rdi),%xmm2
pand 256(%r10),%xmm0
movdqa -80(%rdi),%xmm3
pand 272(%r10),%xmm1
por %xmm0,%xmm4
pand 288(%r10),%xmm2
por %xmm1,%xmm5
pand 304(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%rdi),%xmm0
movdqa -48(%rdi),%xmm1
movdqa -32(%rdi),%xmm2
pand 320(%r10),%xmm0
movdqa -16(%rdi),%xmm3
pand 336(%r10),%xmm1
por %xmm0,%xmm4
pand 352(%r10),%xmm2
por %xmm1,%xmm5
pand 368(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%rdi),%xmm0
movdqa 16(%rdi),%xmm1
movdqa 32(%rdi),%xmm2
pand 384(%r10),%xmm0
movdqa 48(%rdi),%xmm3
pand 400(%r10),%xmm1
por %xmm0,%xmm4
pand 416(%r10),%xmm2
por %xmm1,%xmm5
pand 432(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%rdi),%xmm0
movdqa 80(%rdi),%xmm1
movdqa 96(%rdi),%xmm2
pand 448(%r10),%xmm0
movdqa 112(%rdi),%xmm3
pand 464(%r10),%xmm1
por %xmm0,%xmm4
pand 480(%r10),%xmm2
por %xmm1,%xmm5
pand 496(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
leaq 256(%rdi),%rdi
.byte 102,72,15,126,194
movq %rbp,(%rbx)
leaq 32(%rbx,%rax,1),%rbx
mulxq 0(%rsi),%r8,%r11
xorq %rbp,%rbp
movq %rdx,%r9
mulxq 8(%rsi),%r14,%r12
adoxq -32(%rbx),%r8
adcxq %r14,%r11
mulxq 16(%rsi),%r15,%r13
adoxq -24(%rbx),%r11
adcxq %r15,%r12
mulxq 24(%rsi),%rdx,%r14
adoxq -16(%rbx),%r12
adcxq %rdx,%r13
leaq (%rcx,%rax,1),%rcx
leaq 32(%rsi),%rsi
adoxq -8(%rbx),%r13
adcxq %rbp,%r14
adoxq %rbp,%r14
movq %r8,%r15
imulq 32+8(%rsp),%r8
movq %r8,%rdx
xorq %rbp,%rbp
movq %rdi,8+8(%rsp)
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq 24+8(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r12
movq %r11,-24(%rbx)
adoxq %rbp,%r15
movq %r12,-16(%rbx)
leaq 32(%rcx),%rcx
jmp L$mulx4x_inner
.p2align 5
L$mulx4x_inner:
mulxq 0(%rsi),%r10,%rax
adcxq %rbp,%r15
adoxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq 0(%rbx),%r10
adoxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq 8(%rbx),%r11
adoxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
movq %r8,%rdx
adcxq 16(%rbx),%r12
adoxq %rax,%r13
adcxq 24(%rbx),%r13
adoxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adcxq %rbp,%r14
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
adoxq %r15,%r13
movq %r11,-32(%rbx)
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
leaq 32(%rcx),%rcx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_inner
movq 0+8(%rsp),%rax
adcq %rbp,%r15
subq 0(%rbx),%rdi
movq 8+8(%rsp),%rdi
movq 16+8(%rsp),%r10
adcq %r15,%r14
leaq (%rsi,%rax,1),%rsi
adcq %rbp,%rbp
movq %r14,-8(%rbx)
cmpq %r10,%rdi
jb L$mulx4x_outer
movq -8(%rcx),%r10
movq %rbp,%r8
movq (%rcx,%rax,1),%r12
leaq (%rcx,%rax,1),%rbp
movq %rax,%rcx
leaq (%rbx,%rax,1),%rdi
xorl %eax,%eax
xorq %r15,%r15
subq %r14,%r10
adcq %r15,%r15
orq %r15,%r8
sarq $3+2,%rcx
subq %r8,%rax
movq 56+8(%rsp),%rdx
decq %r12
movq 8(%rbp),%r13
xorq %r8,%r8
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqrx4x_sub_entry
.p2align 5
bn_powerx5:
movq %rsp,%rax
L$powerx5_enter:
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$powerx5_prologue:
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$pwrx_sp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$pwrx_sp_done
.p2align 5
L$pwrx_sp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$pwrx_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwrx_page_walk
jmp L$pwrx_page_walk_done
L$pwrx_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwrx_page_walk
L$pwrx_page_walk_done:
movq %r9,%r10
negq %r9
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,72,15,110,209
.byte 102,73,15,110,218
.byte 102,72,15,110,226
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$powerx5_body:
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
movq %r10,%r9
movq %rsi,%rdi
.byte 102,72,15,126,209
.byte 102,72,15,126,226
movq 40(%rsp),%rax
call mulx4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$powerx5_epilogue:
ret
.globl _bn_sqrx8x_internal
.private_extern _bn_sqrx8x_internal
.private_extern _bn_sqrx8x_internal
.p2align 5
_bn_sqrx8x_internal:
__bn_sqrx8x_internal:
_CET_ENDBR
leaq 48+8(%rsp),%rdi
leaq (%rsi,%r9,1),%rbp
movq %r9,0+8(%rsp)
movq %rbp,8+8(%rsp)
jmp L$sqr8x_zero_start
.p2align 5
.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
L$sqrx8x_zero:
.byte 0x3e
movdqa %xmm0,0(%rdi)
movdqa %xmm0,16(%rdi)
movdqa %xmm0,32(%rdi)
movdqa %xmm0,48(%rdi)
L$sqr8x_zero_start:
movdqa %xmm0,64(%rdi)
movdqa %xmm0,80(%rdi)
movdqa %xmm0,96(%rdi)
movdqa %xmm0,112(%rdi)
leaq 128(%rdi),%rdi
subq $64,%r9
jnz L$sqrx8x_zero
movq 0(%rsi),%rdx
xorq %r10,%r10
xorq %r11,%r11
xorq %r12,%r12
xorq %r13,%r13
xorq %r14,%r14
xorq %r15,%r15
leaq 48+8(%rsp),%rdi
xorq %rbp,%rbp
jmp L$sqrx8x_outer_loop
.p2align 5
L$sqrx8x_outer_loop:
mulxq 8(%rsi),%r8,%rax
adcxq %r9,%r8
adoxq %rax,%r10
mulxq 16(%rsi),%r9,%rax
adcxq %r10,%r9
adoxq %rax,%r11
.byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00
adcxq %r11,%r10
adoxq %rax,%r12
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00
adcxq %r12,%r11
adoxq %rax,%r13
mulxq 40(%rsi),%r12,%rax
adcxq %r13,%r12
adoxq %rax,%r14
mulxq 48(%rsi),%r13,%rax
adcxq %r14,%r13
adoxq %r15,%rax
mulxq 56(%rsi),%r14,%r15
movq 8(%rsi),%rdx
adcxq %rax,%r14
adoxq %rbp,%r15
adcq 64(%rdi),%r15
movq %r8,8(%rdi)
movq %r9,16(%rdi)
sbbq %rcx,%rcx
xorq %rbp,%rbp
mulxq 16(%rsi),%r8,%rbx
mulxq 24(%rsi),%r9,%rax
adcxq %r10,%r8
adoxq %rbx,%r9
mulxq 32(%rsi),%r10,%rbx
adcxq %r11,%r9
adoxq %rax,%r10
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00
adcxq %r12,%r10
adoxq %rbx,%r11
.byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00
adcxq %r13,%r11
adoxq %r14,%r12
.byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00
movq 16(%rsi),%rdx
adcxq %rax,%r12
adoxq %rbx,%r13
adcxq %r15,%r13
adoxq %rbp,%r14
adcxq %rbp,%r14
movq %r8,24(%rdi)
movq %r9,32(%rdi)
mulxq 24(%rsi),%r8,%rbx
mulxq 32(%rsi),%r9,%rax
adcxq %r10,%r8
adoxq %rbx,%r9
mulxq 40(%rsi),%r10,%rbx
adcxq %r11,%r9
adoxq %rax,%r10
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00
adcxq %r12,%r10
adoxq %r13,%r11
.byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00
.byte 0x3e
movq 24(%rsi),%rdx
adcxq %rbx,%r11
adoxq %rax,%r12
adcxq %r14,%r12
movq %r8,40(%rdi)
movq %r9,48(%rdi)
mulxq 32(%rsi),%r8,%rax
adoxq %rbp,%r13
adcxq %rbp,%r13
mulxq 40(%rsi),%r9,%rbx
adcxq %r10,%r8
adoxq %rax,%r9
mulxq 48(%rsi),%r10,%rax
adcxq %r11,%r9
adoxq %r12,%r10
mulxq 56(%rsi),%r11,%r12
movq 32(%rsi),%rdx
movq 40(%rsi),%r14
adcxq %rbx,%r10
adoxq %rax,%r11
movq 48(%rsi),%r15
adcxq %r13,%r11
adoxq %rbp,%r12
adcxq %rbp,%r12
movq %r8,56(%rdi)
movq %r9,64(%rdi)
mulxq %r14,%r9,%rax
movq 56(%rsi),%r8
adcxq %r10,%r9
mulxq %r15,%r10,%rbx
adoxq %rax,%r10
adcxq %r11,%r10
mulxq %r8,%r11,%rax
movq %r14,%rdx
adoxq %rbx,%r11
adcxq %r12,%r11
adcxq %rbp,%rax
mulxq %r15,%r14,%rbx
mulxq %r8,%r12,%r13
movq %r15,%rdx
leaq 64(%rsi),%rsi
adcxq %r14,%r11
adoxq %rbx,%r12
adcxq %rax,%r12
adoxq %rbp,%r13
.byte 0x67,0x67
mulxq %r8,%r8,%r14
adcxq %r8,%r13
adcxq %rbp,%r14
cmpq 8+8(%rsp),%rsi
je L$sqrx8x_outer_break
negq %rcx
movq $-8,%rcx
movq %rbp,%r15
movq 64(%rdi),%r8
adcxq 72(%rdi),%r9
adcxq 80(%rdi),%r10
adcxq 88(%rdi),%r11
adcq 96(%rdi),%r12
adcq 104(%rdi),%r13
adcq 112(%rdi),%r14
adcq 120(%rdi),%r15
leaq (%rsi),%rbp
leaq 128(%rdi),%rdi
sbbq %rax,%rax
movq -64(%rsi),%rdx
movq %rax,16+8(%rsp)
movq %rdi,24+8(%rsp)
xorl %eax,%eax
jmp L$sqrx8x_loop
.p2align 5
L$sqrx8x_loop:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rax,%rbx
adoxq %r9,%r8
mulxq 8(%rbp),%rax,%r9
adcxq %rax,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rax,%r10
adcxq %rax,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
movq %rbx,(%rdi,%rcx,8)
movl $0,%ebx
adcxq %rax,%r13
adoxq %r15,%r14
.byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00
movq 8(%rsi,%rcx,8),%rdx
adcxq %rax,%r14
adoxq %rbx,%r15
adcxq %rbx,%r15
.byte 0x67
incq %rcx
jnz L$sqrx8x_loop
leaq 64(%rbp),%rbp
movq $-8,%rcx
cmpq 8+8(%rsp),%rbp
je L$sqrx8x_break
subq 16+8(%rsp),%rbx
.byte 0x66
movq -64(%rsi),%rdx
adcxq 0(%rdi),%r8
adcxq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
.byte 0x67
sbbq %rax,%rax
xorl %ebx,%ebx
movq %rax,16+8(%rsp)
jmp L$sqrx8x_loop
.p2align 5
L$sqrx8x_break:
xorq %rbp,%rbp
subq 16+8(%rsp),%rbx
adcxq %rbp,%r8
movq 24+8(%rsp),%rcx
adcxq %rbp,%r9
movq 0(%rsi),%rdx
adcq $0,%r10
movq %r8,0(%rdi)
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
cmpq %rcx,%rdi
je L$sqrx8x_outer_loop
movq %r9,8(%rdi)
movq 8(%rcx),%r9
movq %r10,16(%rdi)
movq 16(%rcx),%r10
movq %r11,24(%rdi)
movq 24(%rcx),%r11
movq %r12,32(%rdi)
movq 32(%rcx),%r12
movq %r13,40(%rdi)
movq 40(%rcx),%r13
movq %r14,48(%rdi)
movq 48(%rcx),%r14
movq %r15,56(%rdi)
movq 56(%rcx),%r15
movq %rcx,%rdi
jmp L$sqrx8x_outer_loop
.p2align 5
L$sqrx8x_outer_break:
movq %r9,72(%rdi)
.byte 102,72,15,126,217
movq %r10,80(%rdi)
movq %r11,88(%rdi)
movq %r12,96(%rdi)
movq %r13,104(%rdi)
movq %r14,112(%rdi)
leaq 48+8(%rsp),%rdi
movq (%rsi,%rcx,1),%rdx
movq 8(%rdi),%r11
xorq %r10,%r10
movq 0+8(%rsp),%r9
adoxq %r11,%r11
movq 16(%rdi),%r12
movq 24(%rdi),%r13
.p2align 5
L$sqrx4x_shift_n_add:
mulxq %rdx,%rax,%rbx
adoxq %r12,%r12
adcxq %r10,%rax
.byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00
.byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00
adoxq %r13,%r13
adcxq %r11,%rbx
movq 40(%rdi),%r11
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r10,%r10
adcxq %r12,%rax
movq 16(%rsi,%rcx,1),%rdx
movq 48(%rdi),%r12
adoxq %r11,%r11
adcxq %r13,%rbx
movq 56(%rdi),%r13
movq %rax,16(%rdi)
movq %rbx,24(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r12,%r12
adcxq %r10,%rax
movq 24(%rsi,%rcx,1),%rdx
leaq 32(%rcx),%rcx
movq 64(%rdi),%r10
adoxq %r13,%r13
adcxq %r11,%rbx
movq 72(%rdi),%r11
movq %rax,32(%rdi)
movq %rbx,40(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r10,%r10
adcxq %r12,%rax
jrcxz L$sqrx4x_shift_n_add_break
.byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00
adoxq %r11,%r11
adcxq %r13,%rbx
movq 80(%rdi),%r12
movq 88(%rdi),%r13
movq %rax,48(%rdi)
movq %rbx,56(%rdi)
leaq 64(%rdi),%rdi
nop
jmp L$sqrx4x_shift_n_add
.p2align 5
L$sqrx4x_shift_n_add_break:
adcxq %r13,%rbx
movq %rax,48(%rdi)
movq %rbx,56(%rdi)
leaq 64(%rdi),%rdi
.byte 102,72,15,126,213
__bn_sqrx8x_reduction:
xorl %eax,%eax
movq 32+8(%rsp),%rbx
movq 48+8(%rsp),%rdx
leaq -64(%rbp,%r9,1),%rcx
movq %rcx,0+8(%rsp)
movq %rdi,8+8(%rsp)
leaq 48+8(%rsp),%rdi
jmp L$sqrx8x_reduction_loop
.p2align 5
L$sqrx8x_reduction_loop:
movq 8(%rdi),%r9
movq 16(%rdi),%r10
movq 24(%rdi),%r11
movq 32(%rdi),%r12
movq %rdx,%r8
imulq %rbx,%rdx
movq 40(%rdi),%r13
movq 48(%rdi),%r14
movq 56(%rdi),%r15
movq %rax,24+8(%rsp)
leaq 64(%rdi),%rdi
xorq %rsi,%rsi
movq $-8,%rcx
jmp L$sqrx8x_reduce
.p2align 5
L$sqrx8x_reduce:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rbx,%rax
adoxq %r9,%r8
mulxq 8(%rbp),%rbx,%r9
adcxq %rbx,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rbx,%r10
adcxq %rbx,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rbx,%r11
adcxq %rbx,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00
movq %rdx,%rax
movq %r8,%rdx
adcxq %rbx,%r11
adoxq %r13,%r12
mulxq 32+8(%rsp),%rbx,%rdx
movq %rax,%rdx
movq %rax,64+48+8(%rsp,%rcx,8)
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
adcxq %rax,%r13
adoxq %r15,%r14
mulxq 56(%rbp),%rax,%r15
movq %rbx,%rdx
adcxq %rax,%r14
adoxq %rsi,%r15
adcxq %rsi,%r15
.byte 0x67,0x67,0x67
incq %rcx
jnz L$sqrx8x_reduce
movq %rsi,%rax
cmpq 0+8(%rsp),%rbp
jae L$sqrx8x_no_tail
movq 48+8(%rsp),%rdx
addq 0(%rdi),%r8
leaq 64(%rbp),%rbp
movq $-8,%rcx
adcxq 8(%rdi),%r9
adcxq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
sbbq %rax,%rax
xorq %rsi,%rsi
movq %rax,16+8(%rsp)
jmp L$sqrx8x_tail
.p2align 5
L$sqrx8x_tail:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rax,%rbx
adoxq %r9,%r8
mulxq 8(%rbp),%rax,%r9
adcxq %rax,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rax,%r10
adcxq %rax,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
adcxq %rax,%r13
adoxq %r15,%r14
mulxq 56(%rbp),%rax,%r15
movq 72+48+8(%rsp,%rcx,8),%rdx
adcxq %rax,%r14
adoxq %rsi,%r15
movq %rbx,(%rdi,%rcx,8)
movq %r8,%rbx
adcxq %rsi,%r15
incq %rcx
jnz L$sqrx8x_tail
cmpq 0+8(%rsp),%rbp
jae L$sqrx8x_tail_done
subq 16+8(%rsp),%rsi
movq 48+8(%rsp),%rdx
leaq 64(%rbp),%rbp
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
sbbq %rax,%rax
subq $8,%rcx
xorq %rsi,%rsi
movq %rax,16+8(%rsp)
jmp L$sqrx8x_tail
.p2align 5
L$sqrx8x_tail_done:
xorq %rax,%rax
addq 24+8(%rsp),%r8
adcq $0,%r9
adcq $0,%r10
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
adcq $0,%rax
subq 16+8(%rsp),%rsi
L$sqrx8x_no_tail:
adcq 0(%rdi),%r8
.byte 102,72,15,126,217
adcq 8(%rdi),%r9
movq 56(%rbp),%rsi
.byte 102,72,15,126,213
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
adcq $0,%rax
movq 32+8(%rsp),%rbx
movq 64(%rdi,%rcx,1),%rdx
movq %r8,0(%rdi)
leaq 64(%rdi),%r8
movq %r9,8(%rdi)
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq %r12,32(%rdi)
movq %r13,40(%rdi)
movq %r14,48(%rdi)
movq %r15,56(%rdi)
leaq 64(%rdi,%rcx,1),%rdi
cmpq 8+8(%rsp),%r8
jb L$sqrx8x_reduction_loop
ret
.p2align 5
__bn_postx4x_internal:
movq 0(%rbp),%r12
movq %rcx,%r10
movq %rcx,%r9
negq %rax
sarq $3+2,%rcx
.byte 102,72,15,126,202
.byte 102,72,15,126,206
decq %r12
movq 8(%rbp),%r13
xorq %r8,%r8
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqrx4x_sub_entry
.p2align 4
L$sqrx4x_sub:
movq 0(%rbp),%r12
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
L$sqrx4x_sub_entry:
andnq %rax,%r12,%r12
leaq 32(%rbp),%rbp
andnq %rax,%r13,%r13
andnq %rax,%r14,%r14
andnq %rax,%r15,%r15
negq %r8
adcq 0(%rdi),%r12
adcq 8(%rdi),%r13
adcq 16(%rdi),%r14
adcq 24(%rdi),%r15
movq %r12,0(%rdx)
leaq 32(%rdi),%rdi
movq %r13,8(%rdx)
sbbq %r8,%r8
movq %r14,16(%rdx)
movq %r15,24(%rdx)
leaq 32(%rdx),%rdx
incq %rcx
jnz L$sqrx4x_sub
negq %r9
ret
.globl _bn_scatter5
.private_extern _bn_scatter5
.p2align 4
_bn_scatter5:
_CET_ENDBR
cmpl $0,%esi
jz L$scatter_epilogue
leaq (%rdx,%rcx,8),%rdx
L$scatter:
movq (%rdi),%rax
leaq 8(%rdi),%rdi
movq %rax,(%rdx)
leaq 256(%rdx),%rdx
subl $1,%esi
jnz L$scatter
L$scatter_epilogue:
ret
.globl _bn_gather5
.private_extern _bn_gather5
.p2align 5
_bn_gather5:
L$SEH_begin_bn_gather5:
_CET_ENDBR
.byte 0x4c,0x8d,0x14,0x24
.byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00
leaq L$inc(%rip),%rax
andq $-16,%rsp
movd %ecx,%xmm5
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 128(%rdx),%r11
leaq 128(%rsp),%rax
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,-128(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,-112(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,-96(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,-80(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,-64(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,-48(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,-32(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,-16(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,0(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,16(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,32(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,48(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,64(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,80(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,96(%rax)
movdqa %xmm4,%xmm2
movdqa %xmm3,112(%rax)
jmp L$gather
.p2align 5
L$gather:
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa -128(%r11),%xmm0
movdqa -112(%r11),%xmm1
movdqa -96(%r11),%xmm2
pand -128(%rax),%xmm0
movdqa -80(%r11),%xmm3
pand -112(%rax),%xmm1
por %xmm0,%xmm4
pand -96(%rax),%xmm2
por %xmm1,%xmm5
pand -80(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%r11),%xmm0
movdqa -48(%r11),%xmm1
movdqa -32(%r11),%xmm2
pand -64(%rax),%xmm0
movdqa -16(%r11),%xmm3
pand -48(%rax),%xmm1
por %xmm0,%xmm4
pand -32(%rax),%xmm2
por %xmm1,%xmm5
pand -16(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%r11),%xmm0
movdqa 16(%r11),%xmm1
movdqa 32(%r11),%xmm2
pand 0(%rax),%xmm0
movdqa 48(%r11),%xmm3
pand 16(%rax),%xmm1
por %xmm0,%xmm4
pand 32(%rax),%xmm2
por %xmm1,%xmm5
pand 48(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%r11),%xmm0
movdqa 80(%r11),%xmm1
movdqa 96(%r11),%xmm2
pand 64(%rax),%xmm0
movdqa 112(%r11),%xmm3
pand 80(%rax),%xmm1
por %xmm0,%xmm4
pand 96(%rax),%xmm2
por %xmm1,%xmm5
pand 112(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
leaq 256(%r11),%r11
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
movq %xmm0,(%rdi)
leaq 8(%rdi),%rdi
subl $1,%esi
jnz L$gather
leaq (%r10),%rsp
ret
L$SEH_end_bn_gather5:
.section __DATA,__const
.p2align 6
L$inc:
.long 0,0, 1,1
.long 2,2, 2,2
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
#endif
|
chairq/First-choice
| 25,499
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/vpaes-armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#include <ring-core/arm_arch.h>
.section __TEXT,__const
.align 7 // totally strategic alignment
_vpaes_consts:
Lk_mc_forward: // mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
Lk_mc_backward: // mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
Lk_sr: // sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
//
// "Hot" constants
//
Lk_inv: // inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
Lk_ipt: // input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
Lk_sbo: // sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
Lk_sb1: // sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
Lk_sb2: // sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
//
// Key schedule constants
//
Lk_dksd: // decryption key schedule: invskew x*D
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
Lk_dksb: // decryption key schedule: invskew x*B
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
Lk_dkse: // decryption key schedule: invskew x*E + 0x63
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
Lk_dks9: // decryption key schedule: invskew x*9
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
Lk_rcon: // rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
Lk_opt: // output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
Lk_deskew: // deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.align 6
.text
##
## _aes_preheat
##
## Fills register %r10 -> .aes_consts (so you can -fPIC)
## and %xmm9-%xmm15 as specified below.
##
.align 4
_vpaes_encrypt_preheat:
adrp x10, Lk_inv@PAGE
add x10, x10, Lk_inv@PAGEOFF
movi v17.16b, #0x0f
ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv
ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2
ret
##
## _aes_encrypt_core
##
## AES-encrypt %xmm0.
##
## Inputs:
## %xmm0 = input
## %xmm9-%xmm15 as in _vpaes_preheat
## (%rdx) = scheduled keys
##
## Output in %xmm0
## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
## Preserves %xmm6 - %xmm8 so you get some local vectors
##
##
.align 4
_vpaes_encrypt_core:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward@PAGE+16
add x11, x11, Lk_mc_forward@PAGEOFF+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
b Lenc_entry
.align 4
Lenc_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
sub w8, w8, #1 // nr--
Lenc_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
ret
.globl _vpaes_encrypt
.private_extern _vpaes_encrypt
.align 4
_vpaes_encrypt:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v7.16b}, [x0]
bl _vpaes_encrypt_preheat
bl _vpaes_encrypt_core
st1 {v0.16b}, [x1]
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 4
_vpaes_encrypt_2x:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward@PAGE+16
add x11, x11, Lk_mc_forward@PAGEOFF+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
and v9.16b, v15.16b, v17.16b
ushr v8.16b, v15.16b, #4
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
tbl v9.16b, {v20.16b}, v9.16b
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
tbl v10.16b, {v21.16b}, v8.16b
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v8.16b, v9.16b, v16.16b
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
eor v8.16b, v8.16b, v10.16b
b Lenc_2x_entry
.align 4
Lenc_2x_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
tbl v12.16b, {v25.16b}, v10.16b
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
tbl v8.16b, {v24.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
tbl v13.16b, {v27.16b}, v10.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
tbl v10.16b, {v26.16b}, v11.16b
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
tbl v11.16b, {v8.16b}, v1.16b
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
eor v10.16b, v10.16b, v13.16b
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
tbl v8.16b, {v8.16b}, v4.16b
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
eor v11.16b, v11.16b, v10.16b
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
tbl v12.16b, {v11.16b},v1.16b
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
eor v8.16b, v8.16b, v11.16b
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
eor v8.16b, v8.16b, v12.16b
sub w8, w8, #1 // nr--
Lenc_2x_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
and v9.16b, v8.16b, v17.16b
ushr v8.16b, v8.16b, #4
tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
tbl v13.16b, {v19.16b},v9.16b
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
eor v9.16b, v9.16b, v8.16b
tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v11.16b, {v18.16b},v8.16b
tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
tbl v12.16b, {v18.16b},v9.16b
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v11.16b, v11.16b, v13.16b
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
eor v12.16b, v12.16b, v13.16b
tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v10.16b, {v18.16b},v11.16b
tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
tbl v11.16b, {v18.16b},v12.16b
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v10.16b, v10.16b, v9.16b
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
eor v11.16b, v11.16b, v8.16b
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_2x_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
tbl v12.16b, {v22.16b}, v10.16b
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
tbl v8.16b, {v23.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
tbl v1.16b, {v8.16b},v1.16b
ret
########################################################
## ##
## AES key schedule ##
## ##
########################################################
.align 4
_vpaes_key_preheat:
adrp x10, Lk_inv@PAGE
add x10, x10, Lk_inv@PAGEOFF
movi v16.16b, #0x5b // Lk_s63
adrp x11, Lk_sb1@PAGE
add x11, x11, Lk_sb1@PAGEOFF
movi v17.16b, #0x0f // Lk_s0F
ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt
adrp x10, Lk_dksd@PAGE
add x10, x10, Lk_dksd@PAGEOFF
ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1
adrp x11, Lk_mc_forward@PAGE
add x11, x11, Lk_mc_forward@PAGEOFF
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb
ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9
ld1 {v8.2d}, [x10] // Lk_rcon
ld1 {v9.2d}, [x11] // Lk_mc_forward[0]
ret
.align 4
_vpaes_schedule_core:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp,#-16]!
add x29,sp,#0
bl _vpaes_key_preheat // load the tables
ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
// input transform
mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
adrp x10, Lk_sr@PAGE // lea Lk_sr(%rip),%r10
add x10, x10, Lk_sr@PAGEOFF
add x8, x8, x10
// encrypting, output zeroth round key after transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx)
cmp w1, #192 // cmp $192, %esi
b.hi Lschedule_256
b.eq Lschedule_192
// 128: fall though
##
## .schedule_128
##
## 128-bit specific part of key schedule.
##
## This schedule is really simple, because all its parts
## are accomplished by the subroutines.
##
Lschedule_128:
mov x0, #10 // mov $10, %esi
Loop_schedule_128:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // write output
b Loop_schedule_128
##
## .aes_schedule_192
##
## 192-bit specific part of key schedule.
##
## The main body of this schedule is the same as the 128-bit
## schedule, but with more smearing. The long, high side is
## stored in %xmm7 as before, and the short, low side is in
## the high bits of %xmm6.
##
## This schedule is somewhat nastier, however, because each
## round produces 192 bits of key material, or 1.5 round keys.
## Therefore, on each cycle we do 2 rounds and produce 3 round
## keys.
##
.align 4
Lschedule_192:
sub x0, x0, #8
ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
bl _vpaes_schedule_transform // input transform
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
mov x0, #4 // mov $4, %esi
Loop_schedule_192:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0
bl _vpaes_schedule_mangle // save key n
bl _vpaes_schedule_192_smear
bl _vpaes_schedule_mangle // save key n+1
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // save key n+2
bl _vpaes_schedule_192_smear
b Loop_schedule_192
##
## .aes_schedule_256
##
## 256-bit specific part of key schedule.
##
## The structure here is very similar to the 128-bit
## schedule, but with an additional "low side" in
## %xmm6. The low side's rounds are the same as the
## high side's, except no rcon and no rotation.
##
.align 4
Lschedule_256:
ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform // input transform
mov x0, #7 // mov $7, %esi
Loop_schedule_256:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_mangle // output low result
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
// high round
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle
// low round. swap xmm7 and xmm6
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
movi v4.16b, #0
mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
b Loop_schedule_256
##
## .aes_schedule_mangle_last
##
## Mangler for last round of key schedule
## Mangles %xmm0
## when encrypting, outputs out(%xmm0) ^ 63
## when decrypting, outputs unskew(%xmm0)
##
## Always called right before return... jumps to cleanup and exits
##
.align 4
Lschedule_mangle_last:
// schedule last round key from xmm0
adrp x11, Lk_deskew@PAGE // lea Lk_deskew(%rip),%r11 # prepare to deskew
add x11, x11, Lk_deskew@PAGEOFF
cbnz w3, Lschedule_mangle_last_dec
// encrypting
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
adrp x11, Lk_opt@PAGE // lea Lk_opt(%rip), %r11 # prepare to output transform
add x11, x11, Lk_opt@PAGEOFF
add x2, x2, #32 // add $32, %rdx
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
Lschedule_mangle_last_dec:
ld1 {v20.2d,v21.2d}, [x11] // reload constants
sub x2, x2, #16 // add $-16, %rdx
eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform // output transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key
// cleanup
eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
ldp x29, x30, [sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
##
## .aes_schedule_192_smear
##
## Smear the short, low side in the 192-bit key schedule.
##
## Inputs:
## %xmm7: high side, b a x y
## %xmm6: low side, d c 0 0
## %xmm13: 0
##
## Outputs:
## %xmm6: b+c+d b+c 0 0
## %xmm0: b+c+d b+c b a
##
.align 4
_vpaes_schedule_192_smear:
movi v1.16b, #0
dup v0.4s, v7.s[3]
ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
ret
##
## .aes_schedule_round
##
## Runs one main round of the key schedule on %xmm0, %xmm7
##
## Specifically, runs subbytes on the high dword of %xmm0
## then rotates it by one byte and xors into the low dword of
## %xmm7.
##
## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
## next rcon.
##
## Smears the dwords of %xmm7 by xoring the low into the
## second low, result into third, result into highest.
##
## Returns results in %xmm7 = %xmm0.
## Clobbers %xmm1-%xmm4, %r11.
##
.align 4
_vpaes_schedule_round:
// extract rcon from xmm8
movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1
ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
// rotate
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0
// fall through...
// low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
// smear xmm7
ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4
// subbytes
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7
tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
// add in smeared stuff
eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
ret
##
## .aes_schedule_transform
##
## Linear-transform %xmm0 according to tables at (%r11)
##
## Requires that %xmm9 = 0x0F0F... as in preheat
## Output in %xmm0
## Clobbers %xmm1, %xmm2
##
.align 4
_vpaes_schedule_transform:
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0
// vmovdqa (%r11), %xmm2 # lo
tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
// vmovdqa 16(%r11), %xmm1 # hi
tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
ret
##
## .aes_schedule_mangle
##
## Mangle xmm0 from (basis-transformed) standard version
## to our version.
##
## On encrypt,
## xor with 0x63
## multiply by circulant 0,1,1,1
## apply shiftrows transform
##
## On decrypt,
## xor with 0x63
## multiply by "inverse mixcolumns" circulant E,B,D,9
## deskew
## apply shiftrows transform
##
##
## Writes out to (%rdx), and increments or decrements it
## Keeps track of round number mod 4 in %r8
## Preserves xmm0
## Clobbers xmm1-xmm5
##
.align 4
_vpaes_schedule_mangle:
mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
// vmovdqa .Lk_mc_forward(%rip),%xmm5
// encrypting
eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4
add x2, x2, #16 // add $16, %rdx
tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
Lschedule_mangle_both:
tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
add x8, x8, #48 // add $-16, %r8
and x8, x8, #~(1<<6) // and $0x30, %r8
st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
ret
.globl _vpaes_set_encrypt_key
.private_extern _vpaes_set_encrypt_key
.align 4
_vpaes_set_encrypt_key:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
lsr w9, w1, #5 // shr $5,%eax
add w9, w9, #5 // $5,%eax
str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov w3, #0 // mov $0,%ecx
mov x8, #0x30 // mov $0x30,%r8d
bl _vpaes_schedule_core
eor x0, x0, x0
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _vpaes_ctr32_encrypt_blocks
.private_extern _vpaes_ctr32_encrypt_blocks
.align 4
_vpaes_ctr32_encrypt_blocks:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
stp d10,d11,[sp,#-16]!
stp d12,d13,[sp,#-16]!
stp d14,d15,[sp,#-16]!
cbz x2, Lctr32_done
// Note, unlike the other functions, x2 here is measured in blocks,
// not bytes.
mov x17, x2
mov x2, x3
// Load the IV and counter portion.
ldr w6, [x4, #12]
ld1 {v7.16b}, [x4]
bl _vpaes_encrypt_preheat
tst x17, #1
rev w6, w6 // The counter is big-endian.
b.eq Lctr32_prep_loop
// Handle one block so the remaining block count is even for
// _vpaes_encrypt_2x.
ld1 {v6.16b}, [x0], #16 // Load input ahead of time
bl _vpaes_encrypt_core
eor v0.16b, v0.16b, v6.16b // XOR input and result
st1 {v0.16b}, [x1], #16
subs x17, x17, #1
// Update the counter.
add w6, w6, #1
rev w7, w6
mov v7.s[3], w7
b.ls Lctr32_done
Lctr32_prep_loop:
// _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x
// uses v14 and v15.
mov v15.16b, v7.16b
mov v14.16b, v7.16b
add w6, w6, #1
rev w7, w6
mov v15.s[3], w7
Lctr32_loop:
ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time
bl _vpaes_encrypt_2x
eor v0.16b, v0.16b, v6.16b // XOR input and result
eor v1.16b, v1.16b, v7.16b // XOR input and result (#2)
st1 {v0.16b,v1.16b}, [x1], #32
subs x17, x17, #2
// Update the counter.
add w7, w6, #1
add w6, w6, #2
rev w7, w7
mov v14.s[3], w7
rev w7, w6
mov v15.s[3], w7
b.hi Lctr32_loop
Lctr32_done:
ldp d14,d15,[sp],#16
ldp d12,d13,[sp],#16
ldp d10,d11,[sp],#16
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
chairq/First-choice
| 40,669
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/chacha-armv8-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#include <ring-core/arm_arch.h>
.hidden OPENSSL_armcap_P
.section .rodata
.align 5
.Lsigma:
.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
.Lone:
.long 1,0,0,0
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
.globl ChaCha20_ctr32
.hidden ChaCha20_ctr32
.type ChaCha20_ctr32,%function
.align 5
ChaCha20_ctr32:
AARCH64_VALID_CALL_TARGET
cbz x2,.Labort
#if defined(OPENSSL_HWASAN) && __clang_major__ >= 10
adrp x5,:pg_hi21_nc:OPENSSL_armcap_P
#else
adrp x5,OPENSSL_armcap_P
#endif
cmp x2,#192
b.lo .Lshort
ldr w17,[x5,:lo12:OPENSSL_armcap_P]
tst w17,#ARMV7_NEON
b.ne ChaCha20_neon
.Lshort:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,.Lsigma
add x5,x5,:lo12:.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ldp x28,x30,[x4] // load counter
#ifdef __AARCH64EB__
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
.Loop_outer:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov w7,w23
lsr x8,x23,#32
mov w9,w24
lsr x10,x24,#32
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#64
.Loop:
sub x4,x4,#1
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
ror w21,w21,#16
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#20
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
ror w21,w21,#24
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#25
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#16
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
ror w9,w9,#20
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#24
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
ror w9,w9,#25
cbnz x4,.Loop
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
b.lo .Ltail
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.hi .Loop_outer
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
.Labort:
ret
.align 4
.Ltail:
add x2,x2,#64
.Less_than_64:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
stp x5,x7,[sp,#0]
stp x9,x11,[sp,#16]
stp x13,x15,[sp,#32]
stp x17,x20,[sp,#48]
.Loop_tail:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,.Loop_tail
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ChaCha20_ctr32,.-ChaCha20_ctr32
.type ChaCha20_neon,%function
.align 5
ChaCha20_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,.Lsigma
add x5,x5,:lo12:.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
cmp x2,#512
b.hs .L512_or_more_neon
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
.Loop_outer_neon:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov v0.16b,v24.16b
mov w7,w23
lsr x8,x23,#32
mov v4.16b,v24.16b
mov w9,w24
lsr x10,x24,#32
mov v16.16b,v24.16b
mov w11,w25
mov v1.16b,v25.16b
lsr x12,x25,#32
mov v5.16b,v25.16b
mov w13,w26
mov v17.16b,v25.16b
lsr x14,x26,#32
mov v3.16b,v27.16b
mov w15,w27
mov v7.16b,v28.16b
lsr x16,x27,#32
mov v19.16b,v29.16b
mov w17,w28
mov v2.16b,v26.16b
lsr x19,x28,#32
mov v6.16b,v26.16b
mov w20,w30
mov v18.16b,v26.16b
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#256
.Loop_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v16.4s,v16.4s,v17.4s
add w7,w7,w11
eor v3.16b,v3.16b,v0.16b
add w8,w8,w12
eor v7.16b,v7.16b,v4.16b
eor w17,w17,w5
eor v19.16b,v19.16b,v16.16b
eor w19,w19,w6
rev32 v3.8h,v3.8h
eor w20,w20,w7
rev32 v7.8h,v7.8h
eor w21,w21,w8
rev32 v19.8h,v19.8h
ror w17,w17,#16
add v2.4s,v2.4s,v3.4s
ror w19,w19,#16
add v6.4s,v6.4s,v7.4s
ror w20,w20,#16
add v18.4s,v18.4s,v19.4s
ror w21,w21,#16
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#20
add w16,w16,w21
ushr v5.4s,v21.4s,#20
eor w9,w9,w13
ushr v17.4s,v22.4s,#20
eor w10,w10,w14
sli v1.4s,v20.4s,#12
eor w11,w11,w15
sli v5.4s,v21.4s,#12
eor w12,w12,w16
sli v17.4s,v22.4s,#12
ror w9,w9,#20
add v0.4s,v0.4s,v1.4s
ror w10,w10,#20
add v4.4s,v4.4s,v5.4s
ror w11,w11,#20
add v16.4s,v16.4s,v17.4s
ror w12,w12,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w9
eor v21.16b,v7.16b,v4.16b
add w6,w6,w10
eor v22.16b,v19.16b,v16.16b
add w7,w7,w11
ushr v3.4s,v20.4s,#24
add w8,w8,w12
ushr v7.4s,v21.4s,#24
eor w17,w17,w5
ushr v19.4s,v22.4s,#24
eor w19,w19,w6
sli v3.4s,v20.4s,#8
eor w20,w20,w7
sli v7.4s,v21.4s,#8
eor w21,w21,w8
sli v19.4s,v22.4s,#8
ror w17,w17,#24
add v2.4s,v2.4s,v3.4s
ror w19,w19,#24
add v6.4s,v6.4s,v7.4s
ror w20,w20,#24
add v18.4s,v18.4s,v19.4s
ror w21,w21,#24
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#25
add w16,w16,w21
ushr v5.4s,v21.4s,#25
eor w9,w9,w13
ushr v17.4s,v22.4s,#25
eor w10,w10,w14
sli v1.4s,v20.4s,#7
eor w11,w11,w15
sli v5.4s,v21.4s,#7
eor w12,w12,w16
sli v17.4s,v22.4s,#7
ror w9,w9,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w10,w10,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w10
add v4.4s,v4.4s,v5.4s
add w6,w6,w11
add v16.4s,v16.4s,v17.4s
add w7,w7,w12
eor v3.16b,v3.16b,v0.16b
add w8,w8,w9
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w5
eor v19.16b,v19.16b,v16.16b
eor w17,w17,w6
rev32 v3.8h,v3.8h
eor w19,w19,w7
rev32 v7.8h,v7.8h
eor w20,w20,w8
rev32 v19.8h,v19.8h
ror w21,w21,#16
add v2.4s,v2.4s,v3.4s
ror w17,w17,#16
add v6.4s,v6.4s,v7.4s
ror w19,w19,#16
add v18.4s,v18.4s,v19.4s
ror w20,w20,#16
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#20
add w14,w14,w20
ushr v5.4s,v21.4s,#20
eor w10,w10,w15
ushr v17.4s,v22.4s,#20
eor w11,w11,w16
sli v1.4s,v20.4s,#12
eor w12,w12,w13
sli v5.4s,v21.4s,#12
eor w9,w9,w14
sli v17.4s,v22.4s,#12
ror w10,w10,#20
add v0.4s,v0.4s,v1.4s
ror w11,w11,#20
add v4.4s,v4.4s,v5.4s
ror w12,w12,#20
add v16.4s,v16.4s,v17.4s
ror w9,w9,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w10
eor v21.16b,v7.16b,v4.16b
add w6,w6,w11
eor v22.16b,v19.16b,v16.16b
add w7,w7,w12
ushr v3.4s,v20.4s,#24
add w8,w8,w9
ushr v7.4s,v21.4s,#24
eor w21,w21,w5
ushr v19.4s,v22.4s,#24
eor w17,w17,w6
sli v3.4s,v20.4s,#8
eor w19,w19,w7
sli v7.4s,v21.4s,#8
eor w20,w20,w8
sli v19.4s,v22.4s,#8
ror w21,w21,#24
add v2.4s,v2.4s,v3.4s
ror w17,w17,#24
add v6.4s,v6.4s,v7.4s
ror w19,w19,#24
add v18.4s,v18.4s,v19.4s
ror w20,w20,#24
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#25
add w14,w14,w20
ushr v5.4s,v21.4s,#25
eor w10,w10,w15
ushr v17.4s,v22.4s,#25
eor w11,w11,w16
sli v1.4s,v20.4s,#7
eor w12,w12,w13
sli v5.4s,v21.4s,#7
eor w9,w9,w14
sli v17.4s,v22.4s,#7
ror w10,w10,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w11,w11,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w12,w12,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
cbnz x4,.Loop_neon
add w5,w5,w22 // accumulate key block
add v0.4s,v0.4s,v24.4s
add x6,x6,x22,lsr#32
add v4.4s,v4.4s,v24.4s
add w7,w7,w23
add v16.4s,v16.4s,v24.4s
add x8,x8,x23,lsr#32
add v2.4s,v2.4s,v26.4s
add w9,w9,w24
add v6.4s,v6.4s,v26.4s
add x10,x10,x24,lsr#32
add v18.4s,v18.4s,v26.4s
add w11,w11,w25
add v3.4s,v3.4s,v27.4s
add x12,x12,x25,lsr#32
add w13,w13,w26
add v7.4s,v7.4s,v28.4s
add x14,x14,x26,lsr#32
add w15,w15,w27
add v19.4s,v19.4s,v29.4s
add x16,x16,x27,lsr#32
add w17,w17,w28
add v1.4s,v1.4s,v25.4s
add x19,x19,x28,lsr#32
add w20,w20,w30
add v5.4s,v5.4s,v25.4s
add x21,x21,x30,lsr#32
add v17.4s,v17.4s,v25.4s
b.lo .Ltail_neon
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v20.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v21.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v22.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v23.16b
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
add v27.4s,v27.4s,v31.4s // += 4
stp x13,x15,[x0,#32]
add v28.4s,v28.4s,v31.4s
stp x17,x20,[x0,#48]
add v29.4s,v29.4s,v31.4s
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
eor v16.16b,v16.16b,v0.16b
eor v17.16b,v17.16b,v1.16b
eor v18.16b,v18.16b,v2.16b
eor v19.16b,v19.16b,v3.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
b.hi .Loop_outer_neon
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.Ltail_neon:
add x2,x2,#256
cmp x2,#64
b.lo .Less_than_64
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.eq .Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo .Less_than_128
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v0.16b,v0.16b,v20.16b
eor v1.16b,v1.16b,v21.16b
eor v2.16b,v2.16b,v22.16b
eor v3.16b,v3.16b,v23.16b
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
b.eq .Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo .Less_than_192
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
b.eq .Ldone_neon
sub x2,x2,#64
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp]
b .Last_neon
.Less_than_128:
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp]
b .Last_neon
.Less_than_192:
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp]
b .Last_neon
.align 4
.Last_neon:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
.Loop_tail_neon:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,.Loop_tail_neon
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
.Ldone_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ChaCha20_neon,.-ChaCha20_neon
.type ChaCha20_512_neon,%function
.align 5
ChaCha20_512_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,.Lsigma
add x5,x5,:lo12:.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
.L512_or_more_neon:
sub sp,sp,#128+64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
stp q24,q25,[sp,#0] // off-load key block, invariant part
add v27.4s,v27.4s,v31.4s // not typo
str q26,[sp,#32]
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
add v30.4s,v29.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
stp d8,d9,[sp,#128+0] // meet ABI requirements
stp d10,d11,[sp,#128+16]
stp d12,d13,[sp,#128+32]
stp d14,d15,[sp,#128+48]
sub x2,x2,#512 // not typo
.Loop_outer_512_neon:
mov v0.16b,v24.16b
mov v4.16b,v24.16b
mov v8.16b,v24.16b
mov v12.16b,v24.16b
mov v16.16b,v24.16b
mov v20.16b,v24.16b
mov v1.16b,v25.16b
mov w5,w22 // unpack key block
mov v5.16b,v25.16b
lsr x6,x22,#32
mov v9.16b,v25.16b
mov w7,w23
mov v13.16b,v25.16b
lsr x8,x23,#32
mov v17.16b,v25.16b
mov w9,w24
mov v21.16b,v25.16b
lsr x10,x24,#32
mov v3.16b,v27.16b
mov w11,w25
mov v7.16b,v28.16b
lsr x12,x25,#32
mov v11.16b,v29.16b
mov w13,w26
mov v15.16b,v30.16b
lsr x14,x26,#32
mov v2.16b,v26.16b
mov w15,w27
mov v6.16b,v26.16b
lsr x16,x27,#32
add v19.4s,v3.4s,v31.4s // +4
mov w17,w28
add v23.4s,v7.4s,v31.4s // +4
lsr x19,x28,#32
mov v10.16b,v26.16b
mov w20,w30
mov v14.16b,v26.16b
lsr x21,x30,#32
mov v18.16b,v26.16b
stp q27,q28,[sp,#48] // off-load key block, variable part
mov v22.16b,v26.16b
str q29,[sp,#80]
mov x4,#5
subs x2,x2,#512
.Loop_upper_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,.Loop_upper_neon
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
mov w5,w22 // unpack key block
lsr x6,x22,#32
stp x9,x11,[x0,#16]
mov w7,w23
lsr x8,x23,#32
stp x13,x15,[x0,#32]
mov w9,w24
lsr x10,x24,#32
stp x17,x20,[x0,#48]
add x0,x0,#64
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#5
.Loop_lower_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,.Loop_lower_neon
add w5,w5,w22 // accumulate key block
ldp q24,q25,[sp,#0]
add x6,x6,x22,lsr#32
ldp q26,q27,[sp,#32]
add w7,w7,w23
ldp q28,q29,[sp,#64]
add x8,x8,x23,lsr#32
add v0.4s,v0.4s,v24.4s
add w9,w9,w24
add v4.4s,v4.4s,v24.4s
add x10,x10,x24,lsr#32
add v8.4s,v8.4s,v24.4s
add w11,w11,w25
add v12.4s,v12.4s,v24.4s
add x12,x12,x25,lsr#32
add v16.4s,v16.4s,v24.4s
add w13,w13,w26
add v20.4s,v20.4s,v24.4s
add x14,x14,x26,lsr#32
add v2.4s,v2.4s,v26.4s
add w15,w15,w27
add v6.4s,v6.4s,v26.4s
add x16,x16,x27,lsr#32
add v10.4s,v10.4s,v26.4s
add w17,w17,w28
add v14.4s,v14.4s,v26.4s
add x19,x19,x28,lsr#32
add v18.4s,v18.4s,v26.4s
add w20,w20,w30
add v22.4s,v22.4s,v26.4s
add x21,x21,x30,lsr#32
add v19.4s,v19.4s,v31.4s // +4
add x5,x5,x6,lsl#32 // pack
add v23.4s,v23.4s,v31.4s // +4
add x7,x7,x8,lsl#32
add v3.4s,v3.4s,v27.4s
ldp x6,x8,[x1,#0] // load input
add v7.4s,v7.4s,v28.4s
add x9,x9,x10,lsl#32
add v11.4s,v11.4s,v29.4s
add x11,x11,x12,lsl#32
add v15.4s,v15.4s,v30.4s
ldp x10,x12,[x1,#16]
add v19.4s,v19.4s,v27.4s
add x13,x13,x14,lsl#32
add v23.4s,v23.4s,v28.4s
add x15,x15,x16,lsl#32
add v1.4s,v1.4s,v25.4s
ldp x14,x16,[x1,#32]
add v5.4s,v5.4s,v25.4s
add x17,x17,x19,lsl#32
add v9.4s,v9.4s,v25.4s
add x20,x20,x21,lsl#32
add v13.4s,v13.4s,v25.4s
ldp x19,x21,[x1,#48]
add v17.4s,v17.4s,v25.4s
add x1,x1,#64
add v21.4s,v21.4s,v25.4s
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v24.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v25.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v26.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v27.16b
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#7 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v24.16b
eor v5.16b,v5.16b,v25.16b
eor v6.16b,v6.16b,v26.16b
eor v7.16b,v7.16b,v27.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
eor v8.16b,v8.16b,v0.16b
ldp q24,q25,[sp,#0]
eor v9.16b,v9.16b,v1.16b
ldp q26,q27,[sp,#32]
eor v10.16b,v10.16b,v2.16b
eor v11.16b,v11.16b,v3.16b
st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64
ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64
eor v12.16b,v12.16b,v4.16b
eor v13.16b,v13.16b,v5.16b
eor v14.16b,v14.16b,v6.16b
eor v15.16b,v15.16b,v7.16b
st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64
ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64
eor v16.16b,v16.16b,v8.16b
eor v17.16b,v17.16b,v9.16b
eor v18.16b,v18.16b,v10.16b
eor v19.16b,v19.16b,v11.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
shl v0.4s,v31.4s,#1 // 4 -> 8
eor v20.16b,v20.16b,v12.16b
eor v21.16b,v21.16b,v13.16b
eor v22.16b,v22.16b,v14.16b
eor v23.16b,v23.16b,v15.16b
st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64
add v27.4s,v27.4s,v0.4s // += 8
add v28.4s,v28.4s,v0.4s
add v29.4s,v29.4s,v0.4s
add v30.4s,v30.4s,v0.4s
b.hs .Loop_outer_512_neon
adds x2,x2,#512
ushr v0.4s,v31.4s,#2 // 4 -> 1
ldp d8,d9,[sp,#128+0] // meet ABI requirements
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
stp q24,q31,[sp,#0] // wipe off-load area
stp q24,q31,[sp,#32]
stp q24,q31,[sp,#64]
b.eq .Ldone_512_neon
cmp x2,#192
sub v27.4s,v27.4s,v0.4s // -= 1
sub v28.4s,v28.4s,v0.4s
sub v29.4s,v29.4s,v0.4s
add sp,sp,#128
b.hs .Loop_outer_neon
eor v25.16b,v25.16b,v25.16b
eor v26.16b,v26.16b,v26.16b
eor v27.16b,v27.16b,v27.16b
eor v28.16b,v28.16b,v28.16b
eor v29.16b,v29.16b,v29.16b
eor v30.16b,v30.16b,v30.16b
b .Loop_outer
.Ldone_512_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#128+64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ChaCha20_512_neon,.-ChaCha20_512_neon
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
chairq/First-choice
| 18,837
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/x86_64-mont-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _bn_mul_mont
.private_extern _bn_mul_mont
.p2align 4
_bn_mul_mont:
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
testl $3,%r9d
jnz L$mul_enter
cmpl $8,%r9d
jb L$mul_enter
leaq _OPENSSL_ia32cap_P(%rip),%r11
movl 8(%r11),%r11d
cmpq %rsi,%rdx
jne L$mul4x_enter
testl $7,%r9d
jz L$sqr8x_enter
jmp L$mul4x_enter
.p2align 4
L$mul_enter:
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
negq %r9
movq %rsp,%r11
leaq -16(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul_page_walk
jmp L$mul_page_walk_done
.p2align 4
L$mul_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul_page_walk
L$mul_page_walk_done:
movq %rax,8(%rsp,%r9,8)
L$mul_body:
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%r13
leaq 1(%r15),%r15
jmp L$1st_enter
.p2align 4
L$1st:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r13
movq %r10,%r11
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
L$1st_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 1(%r15),%r15
movq %rdx,%r10
mulq %rbp
cmpq %r9,%r15
jne L$1st
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
movq %r10,%r11
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
jmp L$outer
.p2align 4
L$outer:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq %r8,%rbp
movq (%rsp),%r10
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq 8(%rsp),%r10
movq %rdx,%r13
leaq 1(%r15),%r15
jmp L$inner_enter
.p2align 4
L$inner:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
L$inner_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
leaq 1(%r15),%r15
mulq %rbp
cmpq %r9,%r15
jne L$inner
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
cmpq %r9,%r14
jb L$outer
xorq %r14,%r14
movq (%rsp),%rax
movq %r9,%r15
.p2align 4
L$sub: sbbq (%rcx,%r14,8),%rax
movq %rax,(%rdi,%r14,8)
movq 8(%rsp,%r14,8),%rax
leaq 1(%r14),%r14
decq %r15
jnz L$sub
sbbq $0,%rax
movq $-1,%rbx
xorq %rax,%rbx
xorq %r14,%r14
movq %r9,%r15
L$copy:
movq (%rdi,%r14,8),%rcx
movq (%rsp,%r14,8),%rdx
andq %rbx,%rcx
andq %rax,%rdx
movq %r9,(%rsp,%r14,8)
orq %rcx,%rdx
movq %rdx,(%rdi,%r14,8)
leaq 1(%r14),%r14
subq $1,%r15
jnz L$copy
movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mul_epilogue:
ret
.p2align 4
bn_mul4x_mont:
movl %r9d,%r9d
movq %rsp,%rax
L$mul4x_enter:
andl $0x80100,%r11d
cmpl $0x80100,%r11d
je L$mulx4x_enter
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
negq %r9
movq %rsp,%r11
leaq -32(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul4x_page_walk
jmp L$mul4x_page_walk_done
L$mul4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul4x_page_walk
L$mul4x_page_walk_done:
movq %rax,8(%rsp,%r9,8)
L$mul4x_body:
movq %rdi,16(%rsp,%r9,8)
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp L$1st4x
.p2align 4
L$1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb L$1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
leaq 1(%r14),%r14
.p2align 2
L$outer4x:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq (%rsp),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%rsp),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp L$inner4x
.p2align 4
L$inner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq 8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb L$inner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 1(%r14),%r14
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%rsp,%r9,8),%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
cmpq %r9,%r14
jb L$outer4x
movq 16(%rsp,%r9,8),%rdi
leaq -4(%r9),%r15
movq 0(%rsp),%rax
movq 8(%rsp),%rdx
shrq $2,%r15
leaq (%rsp),%rsi
xorq %r14,%r14
subq 0(%rcx),%rax
movq 16(%rsi),%rbx
movq 24(%rsi),%rbp
sbbq 8(%rcx),%rdx
L$sub4x:
movq %rax,0(%rdi,%r14,8)
movq %rdx,8(%rdi,%r14,8)
sbbq 16(%rcx,%r14,8),%rbx
movq 32(%rsi,%r14,8),%rax
movq 40(%rsi,%r14,8),%rdx
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
movq %rbp,24(%rdi,%r14,8)
sbbq 32(%rcx,%r14,8),%rax
movq 48(%rsi,%r14,8),%rbx
movq 56(%rsi,%r14,8),%rbp
sbbq 40(%rcx,%r14,8),%rdx
leaq 4(%r14),%r14
decq %r15
jnz L$sub4x
movq %rax,0(%rdi,%r14,8)
movq 32(%rsi,%r14,8),%rax
sbbq 16(%rcx,%r14,8),%rbx
movq %rdx,8(%rdi,%r14,8)
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
sbbq $0,%rax
movq %rbp,24(%rdi,%r14,8)
pxor %xmm0,%xmm0
.byte 102,72,15,110,224
pcmpeqd %xmm5,%xmm5
pshufd $0,%xmm4,%xmm4
movq %r9,%r15
pxor %xmm4,%xmm5
shrq $2,%r15
xorl %eax,%eax
jmp L$copy4x
.p2align 4
L$copy4x:
movdqa (%rsp,%rax,1),%xmm1
movdqu (%rdi,%rax,1),%xmm2
pand %xmm4,%xmm1
pand %xmm5,%xmm2
movdqa 16(%rsp,%rax,1),%xmm3
movdqa %xmm0,(%rsp,%rax,1)
por %xmm2,%xmm1
movdqu 16(%rdi,%rax,1),%xmm2
movdqu %xmm1,(%rdi,%rax,1)
pand %xmm4,%xmm3
pand %xmm5,%xmm2
movdqa %xmm0,16(%rsp,%rax,1)
por %xmm2,%xmm3
movdqu %xmm3,16(%rdi,%rax,1)
leaq 32(%rax),%rax
decq %r15
jnz L$copy4x
movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mul4x_epilogue:
ret
.p2align 5
bn_sqr8x_mont:
movq %rsp,%rax
L$sqr8x_enter:
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$sqr8x_prologue:
movl %r9d,%r10d
shll $3,%r9d
shlq $3+2,%r10
negq %r9
leaq -64(%rsp,%r9,2),%r11
movq %rsp,%rbp
movq (%r8),%r8
subq %rsi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$sqr8x_sp_alt
subq %r11,%rbp
leaq -64(%rbp,%r9,2),%rbp
jmp L$sqr8x_sp_done
.p2align 5
L$sqr8x_sp_alt:
leaq 4096-64(,%r9,2),%r10
leaq -64(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$sqr8x_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$sqr8x_page_walk
jmp L$sqr8x_page_walk_done
.p2align 4
L$sqr8x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$sqr8x_page_walk
L$sqr8x_page_walk_done:
movq %r9,%r10
negq %r9
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$sqr8x_body:
.byte 102,72,15,110,209
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,73,15,110,218
leaq _OPENSSL_ia32cap_P(%rip),%rax
movl 8(%rax),%eax
andl $0x80100,%eax
cmpl $0x80100,%eax
jne L$sqr8x_nox
call _bn_sqrx8x_internal
leaq (%r8,%rcx,1),%rbx
movq %rcx,%r9
movq %rcx,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp L$sqr8x_sub
.p2align 5
L$sqr8x_nox:
call _bn_sqr8x_internal
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
movq %r9,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp L$sqr8x_sub
.p2align 5
L$sqr8x_sub:
movq 0(%rbx),%r12
movq 8(%rbx),%r13
movq 16(%rbx),%r14
movq 24(%rbx),%r15
leaq 32(%rbx),%rbx
sbbq 0(%rbp),%r12
sbbq 8(%rbp),%r13
sbbq 16(%rbp),%r14
sbbq 24(%rbp),%r15
leaq 32(%rbp),%rbp
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz L$sqr8x_sub
sbbq $0,%rax
leaq (%rbx,%r9,1),%rbx
leaq (%rdi,%r9,1),%rdi
.byte 102,72,15,110,200
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
jmp L$sqr8x_cond_copy
.p2align 5
L$sqr8x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
movdqa %xmm0,-32(%rbx,%rdx,1)
movdqa %xmm0,-16(%rbx,%rdx,1)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
addq $32,%r9
jnz L$sqr8x_cond_copy
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$sqr8x_epilogue:
ret
.p2align 5
bn_mulx4x_mont:
movq %rsp,%rax
L$mulx4x_enter:
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mulx4x_prologue:
shll $3,%r9d
xorq %r10,%r10
subq %r9,%r10
movq (%r8),%r8
leaq -72(%rsp,%r10,1),%rbp
andq $-128,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
jmp L$mulx4x_page_walk_done
.p2align 4
L$mulx4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
L$mulx4x_page_walk_done:
leaq (%rdx,%r9,1),%r10
movq %r9,0(%rsp)
shrq $5,%r9
movq %r10,16(%rsp)
subq $1,%r9
movq %r8,24(%rsp)
movq %rdi,32(%rsp)
movq %rax,40(%rsp)
movq %r9,48(%rsp)
jmp L$mulx4x_body
.p2align 5
L$mulx4x_body:
leaq 8(%rdx),%rdi
movq (%rdx),%rdx
leaq 64+32(%rsp),%rbx
movq %rdx,%r9
mulxq 0(%rsi),%r8,%rax
mulxq 8(%rsi),%r11,%r14
addq %rax,%r11
movq %rdi,8(%rsp)
mulxq 16(%rsi),%r12,%r13
adcq %r14,%r12
adcq $0,%r13
movq %r8,%rdi
imulq 24(%rsp),%r8
xorq %rbp,%rbp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
leaq 32(%rsi),%rsi
adcxq %rax,%r13
adcxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%rdi
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00
movq 48(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
adcxq %rax,%r12
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r12,-16(%rbx)
jmp L$mulx4x_1st
.p2align 5
L$mulx4x_1st:
adcxq %rbp,%r15
mulxq 0(%rsi),%r10,%rax
adcxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
.byte 0x67,0x67
movq %r8,%rdx
adcxq %rax,%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
movq %r11,-32(%rbx)
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_1st
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
addq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
jmp L$mulx4x_outer
.p2align 5
L$mulx4x_outer:
movq (%rdi),%rdx
leaq 8(%rdi),%rdi
subq %rax,%rsi
movq %r15,(%rbx)
leaq 64+32(%rsp),%rbx
subq %rax,%rcx
mulxq 0(%rsi),%r8,%r11
xorl %ebp,%ebp
movq %rdx,%r9
mulxq 8(%rsi),%r14,%r12
adoxq -32(%rbx),%r8
adcxq %r14,%r11
mulxq 16(%rsi),%r15,%r13
adoxq -24(%rbx),%r11
adcxq %r15,%r12
adoxq -16(%rbx),%r12
adcxq %rbp,%r13
adoxq %rbp,%r13
movq %rdi,8(%rsp)
movq %r8,%r15
imulq 24(%rsp),%r8
xorl %ebp,%ebp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
adcxq %rax,%r13
adoxq -8(%rbx),%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
adoxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
leaq 32(%rcx),%rcx
adcxq %rax,%r12
adoxq %rbp,%r15
movq 48(%rsp),%rdi
movq %r12,-16(%rbx)
jmp L$mulx4x_inner
.p2align 5
L$mulx4x_inner:
mulxq 0(%rsi),%r10,%rax
adcxq %rbp,%r15
adoxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq 0(%rbx),%r10
adoxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq 8(%rbx),%r11
adoxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
movq %r8,%rdx
adcxq 16(%rbx),%r12
adoxq %rax,%r13
adcxq 24(%rbx),%r13
adoxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adcxq %rbp,%r14
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-32(%rbx)
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_inner
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
subq 0(%rbx),%rbp
adcq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
cmpq 16(%rsp),%rdi
jne L$mulx4x_outer
leaq 64(%rsp),%rbx
subq %rax,%rcx
negq %r15
movq %rax,%rdx
shrq $3+2,%rax
movq 32(%rsp),%rdi
jmp L$mulx4x_sub
.p2align 5
L$mulx4x_sub:
movq 0(%rbx),%r11
movq 8(%rbx),%r12
movq 16(%rbx),%r13
movq 24(%rbx),%r14
leaq 32(%rbx),%rbx
sbbq 0(%rcx),%r11
sbbq 8(%rcx),%r12
sbbq 16(%rcx),%r13
sbbq 24(%rcx),%r14
leaq 32(%rcx),%rcx
movq %r11,0(%rdi)
movq %r12,8(%rdi)
movq %r13,16(%rdi)
movq %r14,24(%rdi)
leaq 32(%rdi),%rdi
decq %rax
jnz L$mulx4x_sub
sbbq $0,%r15
leaq 64(%rsp),%rbx
subq %rdx,%rdi
.byte 102,73,15,110,207
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
jmp L$mulx4x_cond_copy
.p2align 5
L$mulx4x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
subq $32,%rdx
jnz L$mulx4x_cond_copy
movq %rdx,(%rbx)
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mulx4x_epilogue:
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 4
#endif
|
chairq/First-choice
| 34,151
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/sha256-armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the OpenSSL license (the "License"). You may not use
// this file except in compliance with the License. You can obtain a copy
// in the file LICENSE in the source distribution or at
// https://www.openssl.org/source/license.html
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project. The module is, however, dual licensed under OpenSSL and
// CRYPTOGAMS licenses depending on where you obtain it. For further
// details see http://www.openssl.org/~appro/cryptogams/.
//
// Permission to use under GPLv2 terms is granted.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
# include <ring-core/arm_arch.h>
#endif
.text
.globl sha256_block_data_order
.def sha256_block_data_order
.type 32
.endef
.align 6
sha256_block_data_order:
AARCH64_VALID_CALL_TARGET
#ifndef __KERNEL__
#if defined(OPENSSL_HWASAN) && __clang_major__ >= 10
adrp x16,:pg_hi21_nc:OPENSSL_armcap_P
#else
adrp x16,OPENSSL_armcap_P
#endif
ldr w16,[x16,:lo12:OPENSSL_armcap_P]
tst w16,#ARMV8_SHA256
b.ne Lv8_entry
#endif
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*4
ldp w20,w21,[x0] // load context
ldp w22,w23,[x0,#2*4]
ldp w24,w25,[x0,#4*4]
add x2,x1,x2,lsl#6 // end of input
ldp w26,w27,[x0,#6*4]
adrp x30,LK256
add x30,x30,:lo12:LK256
stp x0,x2,[x29,#96]
Loop:
ldp w3,w4,[x1],#2*4
ldr w19,[x30],#4 // *K++
eor w28,w21,w22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev w3,w3 // 0
#endif
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w6,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w3 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w4,w4 // 1
#endif
ldp w5,w6,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w7,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w4 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w5,w5 // 2
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w8,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w5 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w6,w6 // 3
#endif
ldp w7,w8,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w9,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w6 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w7,w7 // 4
#endif
add w24,w24,w17 // h+=Sigma0(a)
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w10,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w7 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w10,ror#11 // Sigma1(e)
ror w10,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w10,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w8,w8 // 5
#endif
ldp w9,w10,[x1],#2*4
add w23,w23,w17 // h+=Sigma0(a)
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w11,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w8 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w11,ror#11 // Sigma1(e)
ror w11,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w11,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w9,w9 // 6
#endif
add w22,w22,w17 // h+=Sigma0(a)
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w12,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w9 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w12,ror#11 // Sigma1(e)
ror w12,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w12,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w10,w10 // 7
#endif
ldp w11,w12,[x1],#2*4
add w21,w21,w17 // h+=Sigma0(a)
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
eor w13,w25,w25,ror#14
and w17,w26,w25
bic w28,w27,w25
add w20,w20,w10 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w13,ror#11 // Sigma1(e)
ror w13,w21,#2
add w20,w20,w17 // h+=Ch(e,f,g)
eor w17,w21,w21,ror#9
add w20,w20,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w24,w24,w20 // d+=h
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w13,w17,ror#13 // Sigma0(a)
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w20,w20,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w11,w11 // 8
#endif
add w20,w20,w17 // h+=Sigma0(a)
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w14,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w11 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w14,ror#11 // Sigma1(e)
ror w14,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w14,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w12,w12 // 9
#endif
ldp w13,w14,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w15,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w12 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w15,ror#11 // Sigma1(e)
ror w15,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w15,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w13,w13 // 10
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w0,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w13 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w0,ror#11 // Sigma1(e)
ror w0,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w0,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w14,w14 // 11
#endif
ldp w15,w0,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w6,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w14 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w15,w15 // 12
#endif
add w24,w24,w17 // h+=Sigma0(a)
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w7,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w15 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w0,w0 // 13
#endif
ldp w1,w2,[x1]
add w23,w23,w17 // h+=Sigma0(a)
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w8,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w0 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w1,w1 // 14
#endif
ldr w6,[sp,#12]
add w22,w22,w17 // h+=Sigma0(a)
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w9,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w1 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w2,w2 // 15
#endif
ldr w7,[sp,#0]
add w21,w21,w17 // h+=Sigma0(a)
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
Loop_16_xx:
ldr w8,[sp,#4]
str w11,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w10,w5,#7
and w17,w25,w24
ror w9,w2,#17
bic w19,w26,w24
ror w11,w20,#2
add w27,w27,w3 // h+=X[i]
eor w16,w16,w24,ror#11
eor w10,w10,w5,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w11,w11,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w9,w9,w2,ror#19
eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w11,w20,ror#22 // Sigma0(a)
eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
add w4,w4,w13
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w4,w4,w10
add w27,w27,w17 // h+=Sigma0(a)
add w4,w4,w9
ldr w9,[sp,#8]
str w12,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w11,w6,#7
and w17,w24,w23
ror w10,w3,#17
bic w28,w25,w23
ror w12,w27,#2
add w26,w26,w4 // h+=X[i]
eor w16,w16,w23,ror#11
eor w11,w11,w6,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w12,w12,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w10,w10,w3,ror#19
eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w12,w27,ror#22 // Sigma0(a)
eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
add w5,w5,w14
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w5,w5,w11
add w26,w26,w17 // h+=Sigma0(a)
add w5,w5,w10
ldr w10,[sp,#12]
str w13,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w12,w7,#7
and w17,w23,w22
ror w11,w4,#17
bic w19,w24,w22
ror w13,w26,#2
add w25,w25,w5 // h+=X[i]
eor w16,w16,w22,ror#11
eor w12,w12,w7,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w13,w13,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w11,w11,w4,ror#19
eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w13,w26,ror#22 // Sigma0(a)
eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
add w6,w6,w15
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w6,w6,w12
add w25,w25,w17 // h+=Sigma0(a)
add w6,w6,w11
ldr w11,[sp,#0]
str w14,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w13,w8,#7
and w17,w22,w21
ror w12,w5,#17
bic w28,w23,w21
ror w14,w25,#2
add w24,w24,w6 // h+=X[i]
eor w16,w16,w21,ror#11
eor w13,w13,w8,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w14,w14,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w12,w12,w5,ror#19
eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w14,w25,ror#22 // Sigma0(a)
eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
add w7,w7,w0
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w7,w7,w13
add w24,w24,w17 // h+=Sigma0(a)
add w7,w7,w12
ldr w12,[sp,#4]
str w15,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w14,w9,#7
and w17,w21,w20
ror w13,w6,#17
bic w19,w22,w20
ror w15,w24,#2
add w23,w23,w7 // h+=X[i]
eor w16,w16,w20,ror#11
eor w14,w14,w9,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w15,w15,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w13,w13,w6,ror#19
eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w15,w24,ror#22 // Sigma0(a)
eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
add w8,w8,w1
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w8,w8,w14
add w23,w23,w17 // h+=Sigma0(a)
add w8,w8,w13
ldr w13,[sp,#8]
str w0,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w15,w10,#7
and w17,w20,w27
ror w14,w7,#17
bic w28,w21,w27
ror w0,w23,#2
add w22,w22,w8 // h+=X[i]
eor w16,w16,w27,ror#11
eor w15,w15,w10,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w0,w0,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w14,w14,w7,ror#19
eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w0,w23,ror#22 // Sigma0(a)
eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
add w9,w9,w2
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w9,w9,w15
add w22,w22,w17 // h+=Sigma0(a)
add w9,w9,w14
ldr w14,[sp,#12]
str w1,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w0,w11,#7
and w17,w27,w26
ror w15,w8,#17
bic w19,w20,w26
ror w1,w22,#2
add w21,w21,w9 // h+=X[i]
eor w16,w16,w26,ror#11
eor w0,w0,w11,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w1,w1,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w15,w15,w8,ror#19
eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w1,w22,ror#22 // Sigma0(a)
eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
add w10,w10,w3
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w10,w10,w0
add w21,w21,w17 // h+=Sigma0(a)
add w10,w10,w15
ldr w15,[sp,#0]
str w2,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w1,w12,#7
and w17,w26,w25
ror w0,w9,#17
bic w28,w27,w25
ror w2,w21,#2
add w20,w20,w10 // h+=X[i]
eor w16,w16,w25,ror#11
eor w1,w1,w12,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w2,w2,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w0,w0,w9,ror#19
eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w2,w21,ror#22 // Sigma0(a)
eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
add w11,w11,w4
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w11,w11,w1
add w20,w20,w17 // h+=Sigma0(a)
add w11,w11,w0
ldr w0,[sp,#4]
str w3,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w2,w13,#7
and w17,w25,w24
ror w1,w10,#17
bic w19,w26,w24
ror w3,w20,#2
add w27,w27,w11 // h+=X[i]
eor w16,w16,w24,ror#11
eor w2,w2,w13,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w3,w3,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w1,w1,w10,ror#19
eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w3,w20,ror#22 // Sigma0(a)
eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
add w12,w12,w5
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w12,w12,w2
add w27,w27,w17 // h+=Sigma0(a)
add w12,w12,w1
ldr w1,[sp,#8]
str w4,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w3,w14,#7
and w17,w24,w23
ror w2,w11,#17
bic w28,w25,w23
ror w4,w27,#2
add w26,w26,w12 // h+=X[i]
eor w16,w16,w23,ror#11
eor w3,w3,w14,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w4,w4,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w2,w2,w11,ror#19
eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w4,w27,ror#22 // Sigma0(a)
eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
add w13,w13,w6
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w13,w13,w3
add w26,w26,w17 // h+=Sigma0(a)
add w13,w13,w2
ldr w2,[sp,#12]
str w5,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w4,w15,#7
and w17,w23,w22
ror w3,w12,#17
bic w19,w24,w22
ror w5,w26,#2
add w25,w25,w13 // h+=X[i]
eor w16,w16,w22,ror#11
eor w4,w4,w15,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w5,w5,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w3,w3,w12,ror#19
eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w5,w26,ror#22 // Sigma0(a)
eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
add w14,w14,w7
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w14,w14,w4
add w25,w25,w17 // h+=Sigma0(a)
add w14,w14,w3
ldr w3,[sp,#0]
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w5,w0,#7
and w17,w22,w21
ror w4,w13,#17
bic w28,w23,w21
ror w6,w25,#2
add w24,w24,w14 // h+=X[i]
eor w16,w16,w21,ror#11
eor w5,w5,w0,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w6,w6,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w4,w4,w13,ror#19
eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w25,ror#22 // Sigma0(a)
eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
add w15,w15,w8
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w15,w15,w5
add w24,w24,w17 // h+=Sigma0(a)
add w15,w15,w4
ldr w4,[sp,#4]
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w6,w1,#7
and w17,w21,w20
ror w5,w14,#17
bic w19,w22,w20
ror w7,w24,#2
add w23,w23,w15 // h+=X[i]
eor w16,w16,w20,ror#11
eor w6,w6,w1,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w7,w7,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w5,w5,w14,ror#19
eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w24,ror#22 // Sigma0(a)
eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
add w0,w0,w9
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w0,w0,w6
add w23,w23,w17 // h+=Sigma0(a)
add w0,w0,w5
ldr w5,[sp,#8]
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w7,w2,#7
and w17,w20,w27
ror w6,w15,#17
bic w28,w21,w27
ror w8,w23,#2
add w22,w22,w0 // h+=X[i]
eor w16,w16,w27,ror#11
eor w7,w7,w2,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w8,w8,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w6,w6,w15,ror#19
eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w23,ror#22 // Sigma0(a)
eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
add w1,w1,w10
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w1,w1,w7
add w22,w22,w17 // h+=Sigma0(a)
add w1,w1,w6
ldr w6,[sp,#12]
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w8,w3,#7
and w17,w27,w26
ror w7,w0,#17
bic w19,w20,w26
ror w9,w22,#2
add w21,w21,w1 // h+=X[i]
eor w16,w16,w26,ror#11
eor w8,w8,w3,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w9,w9,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w7,w7,w0,ror#19
eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w22,ror#22 // Sigma0(a)
eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
add w2,w2,w11
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w2,w2,w8
add w21,w21,w17 // h+=Sigma0(a)
add w2,w2,w7
ldr w7,[sp,#0]
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
cbnz w19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#260 // rewind
ldp w3,w4,[x0]
ldp w5,w6,[x0,#2*4]
add x1,x1,#14*4 // advance input pointer
ldp w7,w8,[x0,#4*4]
add w20,w20,w3
ldp w9,w10,[x0,#6*4]
add w21,w21,w4
add w22,w22,w5
add w23,w23,w6
stp w20,w21,[x0]
add w24,w24,w7
add w25,w25,w8
stp w22,w23,[x0,#2*4]
add w26,w26,w9
add w27,w27,w10
cmp x1,x2
stp w24,w25,[x0,#4*4]
stp w26,w27,[x0,#6*4]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*4
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section .rodata
.align 6
LK256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0 //terminator
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.def sha256_block_armv8
.type 32
.endef
.align 6
sha256_block_armv8:
Lv8_entry:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v0.4s,v1.4s},[x0]
adrp x3,LK256
add x3,x3,:lo12:LK256
Loop_hw:
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
sub x2,x2,#1
ld1 {v16.4s},[x3],#16
rev32 v4.16b,v4.16b
rev32 v5.16b,v5.16b
rev32 v6.16b,v6.16b
rev32 v7.16b,v7.16b
orr v18.16b,v0.16b,v0.16b // offload
orr v19.16b,v1.16b,v1.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
ld1 {v17.4s},[x3]
add v16.4s,v16.4s,v6.4s
sub x3,x3,#64*4-16 // rewind
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
add v17.4s,v17.4s,v7.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
add v0.4s,v0.4s,v18.4s
add v1.4s,v1.4s,v19.4s
cbnz x2,Loop_hw
st1 {v0.4s,v1.4s},[x0]
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
chairq/First-choice
| 35,688
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/p256-armv8-asm-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include "ring-core/arm_arch.h"
.section .rodata
.align 5
Lpoly:
.quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001
LRR: // 2^512 mod P precomputed for NIST P256 polynomial
.quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd
Lone_mont:
.quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
Lone:
.quad 1,0,0,0
Lord:
.quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000
LordK:
.quad 0xccd1c8aaee00bc4f
.byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
// void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
// const BN_ULONG x2[4]);
.globl ecp_nistz256_mul_mont
.def ecp_nistz256_mul_mont
.type 32
.endef
.align 4
ecp_nistz256_mul_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_mul_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_sqr_mont
.def ecp_nistz256_sqr_mont
.type 32
.endef
.align 4
ecp_nistz256_sqr_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sqr_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_neg
.def ecp_nistz256_neg
.type 32
.endef
.align 4
ecp_nistz256_neg:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x2,x1
mov x14,xzr // a = 0
mov x15,xzr
mov x16,xzr
mov x17,xzr
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sub_from
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
// note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
// to x4-x7 and b[0] - to x3
.def __ecp_nistz256_mul_mont
.type 32
.endef
.align 4
__ecp_nistz256_mul_mont:
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x11,x7,x3
ldr x3,[x2,#8] // b[1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adc x19,xzr,x11
mov x20,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(1+1)] // b[1+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(2+1)] // b[2+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
// last reduction
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adcs x17,x19,x11
adc x19,x20,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
// note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
// to x4-x7
.def __ecp_nistz256_sqr_mont
.type 32
.endef
.align 4
__ecp_nistz256_sqr_mont:
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x2,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
lsl x8,x14,#32
adcs x1,x1,x11
lsr x9,x14,#32
adc x2,x2,x7
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adc x17,x11,xzr // can't overflow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x2
adc x19,xzr,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
// Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to
// x4-x7 and x8-x11. This is done because it's used in multiple
// contexts, e.g. in multiplication by 2 and 3...
.def __ecp_nistz256_add_to
.type 32
.endef
.align 4
__ecp_nistz256_add_to:
adds x14,x14,x8 // ret = a+b
adcs x15,x15,x9
adcs x16,x16,x10
adcs x17,x17,x11
adc x1,xzr,xzr // zap x1
adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x1,xzr // did subtraction borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
.def __ecp_nistz256_sub_from
.type 32
.endef
.align 4
__ecp_nistz256_sub_from:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x14,x8 // ret = a-b
sbcs x15,x15,x9
sbcs x16,x16,x10
sbcs x17,x17,x11
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.def __ecp_nistz256_sub_morf
.type 32
.endef
.align 4
__ecp_nistz256_sub_morf:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x8,x14 // ret = b-a
sbcs x15,x9,x15
sbcs x16,x10,x16
sbcs x17,x11,x17
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.def __ecp_nistz256_div_by_2
.type 32
.endef
.align 4
__ecp_nistz256_div_by_2:
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adcs x11,x17,x13
adc x1,xzr,xzr // zap x1
tst x14,#1 // is a even?
csel x14,x14,x8,eq // ret = even ? a : a+modulus
csel x15,x15,x9,eq
csel x16,x16,x10,eq
csel x17,x17,x11,eq
csel x1,xzr,x1,eq
lsr x14,x14,#1 // ret >>= 1
orr x14,x14,x15,lsl#63
lsr x15,x15,#1
orr x15,x15,x16,lsl#63
lsr x16,x16,#1
orr x16,x16,x17,lsl#63
lsr x17,x17,#1
stp x14,x15,[x0]
orr x17,x17,x1,lsl#63
stp x16,x17,[x0,#16]
ret
.globl ecp_nistz256_point_double
.def ecp_nistz256_point_double
.type 32
.endef
.align 5
ecp_nistz256_point_double:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
sub sp,sp,#32*4
Ldouble_shortcut:
ldp x14,x15,[x1,#32]
mov x21,x0
ldp x16,x17,[x1,#48]
mov x22,x1
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
mov x8,x14
ldr x13,[x13,#24]
mov x9,x15
ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[x22,#64+16]
add x0,sp,#0
bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y);
add x0,sp,#64
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z);
ldp x8,x9,[x22]
ldp x10,x11,[x22,#16]
mov x4,x14 // put Zsqr aside for p256_sub
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x);
add x2,x22,#0
mov x14,x4 // restore Zsqr
mov x15,x5
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x16,x6
mov x17,x7
ldp x6,x7,[sp,#0+16]
add x0,sp,#64
bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr);
add x0,sp,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S);
ldr x3,[x22,#32]
ldp x4,x5,[x22,#64]
ldp x6,x7,[x22,#64+16]
add x2,x22,#32
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#0+16]
add x0,x21,#64
bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0);
add x0,sp,#96
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S);
ldr x3,[sp,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x0,x21,#32
bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0);
add x2,sp,#64
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr);
mov x8,x14 // duplicate M
mov x9,x15
mov x10,x16
mov x11,x17
mov x4,x14 // put M aside
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to
mov x8,x4 // restore M
mov x9,x5
ldr x3,[x22] // forward load for p256_mul_mont
mov x10,x6
ldp x4,x5,[sp,#0]
mov x11,x7
ldp x6,x7,[sp,#0+16]
bl __ecp_nistz256_add_to // p256_mul_by_3(M, M);
add x2,x22,#0
add x0,sp,#0
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#32+16]
add x0,sp,#96
bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S);
add x0,x21,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M);
add x2,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0);
add x2,sp,#0
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x);
ldr x3,[sp,#32]
mov x4,x14 // copy S
mov x5,x15
mov x6,x16
mov x7,x17
add x2,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M);
add x2,x21,#32
add x0,x21,#32
bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y);
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl ecp_nistz256_point_add
.def ecp_nistz256_point_add
.type 32
.endef
.align 5
ecp_nistz256_point_add:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#32*12
ldp x4,x5,[x2,#64] // in2_z
ldp x6,x7,[x2,#64+16]
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
orr x8,x4,x5
orr x10,x6,x7
orr x25,x8,x10
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z);
ldp x4,x5,[x22,#64] // in1_z
ldp x6,x7,[x22,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x2,x23,#64
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x22,#64
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#32]
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x2,x22,#32
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#352]
ldp x6,x7,[sp,#352+16]
add x2,x23,#32
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,sp,#320
ldr x3,[sp,#192] // forward load for p256_mul_mont
ldp x4,x5,[x22]
ldp x6,x7,[x22,#16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x26,x14,x16 // ~is_equal(S1,S2)
add x2,sp,#192
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr);
ldr x3,[sp,#128]
ldp x4,x5,[x23]
ldp x6,x7,[x23,#16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr);
add x2,sp,#256
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x14,x14,x16 // ~is_equal(U1,U2)
mvn x27,x24 // -1/0 -> 0/-1
mvn x28,x25 // -1/0 -> 0/-1
orr x14,x14,x27
orr x14,x14,x28
orr x14,x14,x26
cbnz x14,Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2))
Ladd_double:
mov x1,x22
mov x0,x21
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames
b Ldouble_shortcut
.align 4
Ladd_proceed:
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#64]
ldp x6,x7,[sp,#64+16]
add x2,x23,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z);
ldr x3,[sp,#96]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,sp,#96
add x0,sp,#224
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[sp,#128]
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#128
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#192
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#224
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#288
ldr x3,[sp,#224] // forward load for p256_mul_mont
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,sp,#224
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#160
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#352
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
Ladd_done:
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl ecp_nistz256_point_add_affine
.def ecp_nistz256_point_add_affine
.type 32
.endef
.align 5
ecp_nistz256_point_add_affine:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-80]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
sub sp,sp,#32*10
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
ldp x4,x5,[x1,#64] // in1_z
ldp x6,x7,[x1,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
ldp x14,x15,[x2] // in2_x
ldp x16,x17,[x2,#16]
ldp x8,x9,[x2,#32] // in2_y
ldp x10,x11,[x2,#48]
orr x14,x14,x15
orr x16,x16,x17
orr x8,x8,x9
orr x10,x10,x11
orr x14,x14,x16
orr x8,x8,x10
orr x25,x14,x8
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
mov x4,x14
mov x5,x15
mov x6,x16
mov x7,x17
ldr x3,[x23]
add x2,x23,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x);
add x2,x22,#0
ldr x3,[x22,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x);
add x2,x22,#64
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#160]
ldp x6,x7,[sp,#160+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x23,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,x22,#32
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#192
bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y);
add x0,sp,#224
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x0,sp,#288
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,sp,#160
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[x22]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,x22,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#224
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#288
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#256
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#96
ldr x3,[x22,#32] // forward load for p256_mul_mont
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,x22,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub);
ldr x3,[sp,#192]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#192
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#128
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
adrp x23,Lone_mont-64
add x23,x23,:lo12:Lone_mont-64
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x29,x30,[sp],#80
AARCH64_VALIDATE_LINK_REGISTER
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
// uint64_t b[4]);
.globl ecp_nistz256_ord_mul_mont
.def ecp_nistz256_ord_mul_mont
.type 32
.endef
.align 4
ecp_nistz256_ord_mul_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,Lord
add x23,x23,:lo12:Lord
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x19,x7,x3
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts of multiplication
adcs x16,x16,x9
adcs x17,x17,x10
adc x19,x19,xzr
mov x20,xzr
ldr x3,[x2,#8*1] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*2] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*3] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
lsl x8,x24,#32 // last reduction
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
// uint64_t rep);
.globl ecp_nistz256_ord_sqr_mont
.def ecp_nistz256_ord_sqr_mont
.type 32
.endef
.align 4
ecp_nistz256_ord_sqr_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,Lord
add x23,x23,:lo12:Lord
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
b Loop_ord_sqr
.align 4
Loop_ord_sqr:
sub x2,x2,#1
////////////////////////////////////////////////////////////////
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x3,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
mul x24,x14,x23
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
adcs x1,x1,x11
adc x3,x3,x7
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
mul x24,x14,x23
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x3
adc x19,xzr,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x5,x15,x9,lo
csel x6,x16,x10,lo
csel x7,x17,x11,lo
cbnz x2,Loop_ord_sqr
stp x4,x5,[x0]
stp x6,x7,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index);
.globl ecp_nistz256_select_w5
.def ecp_nistz256_select_w5
.type 32
.endef
.align 4
ecp_nistz256_select_w5:
AARCH64_VALID_CALL_TARGET
// x10 := x0
// w9 := 0; loop counter and incremented internal index
mov x10, x0
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
movi v20.16b, #0
movi v21.16b, #0
Lselect_w5_loop:
// Loop 16 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// continue loading ...
ld1 {v26.2d, v27.2d}, [x1],#32
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
bit v20.16b, v26.16b, v3.16b
bit v21.16b, v27.16b, v3.16b
// If bit #4 is not 0 (i.e. idx_ctr < 16) loop back
tbz w9, #4, Lselect_w5_loop
// Write [v16-v21] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64
st1 {v20.2d, v21.2d}, [x10]
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index);
.globl ecp_nistz256_select_w7
.def ecp_nistz256_select_w7
.type 32
.endef
.align 4
ecp_nistz256_select_w7:
AARCH64_VALID_CALL_TARGET
// w9 := 0; loop counter and incremented internal index
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
Lselect_w7_loop:
// Loop 64 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
// If bit #6 is not 0 (i.e. idx_ctr < 64) loop back
tbz w9, #6, Lselect_w7_loop
// Write [v16-v19] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0]
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
chairq/First-choice
| 23,114
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/ghash-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.extern OPENSSL_ia32cap_P
.hidden OPENSSL_ia32cap_P
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,@function
.align 16
gcm_init_clmul:
.cfi_startproc
_CET_ENDBR
.L_init_clmul:
movdqu (%rsi),%xmm2
pshufd $78,%xmm2,%xmm2
pshufd $255,%xmm2,%xmm4
movdqa %xmm2,%xmm3
psllq $1,%xmm2
pxor %xmm5,%xmm5
psrlq $63,%xmm3
pcmpgtd %xmm4,%xmm5
pslldq $8,%xmm3
por %xmm3,%xmm2
pand .L0x1c2_polynomial(%rip),%xmm5
pxor %xmm5,%xmm2
pshufd $78,%xmm2,%xmm6
movdqa %xmm2,%xmm0
pxor %xmm2,%xmm6
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm2,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm2,%xmm3
movdqu %xmm2,0(%rdi)
pxor %xmm0,%xmm4
movdqu %xmm0,16(%rdi)
.byte 102,15,58,15,227,8
movdqu %xmm4,32(%rdi)
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
movdqa %xmm0,%xmm5
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm5,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm5,%xmm3
movdqu %xmm5,48(%rdi)
pxor %xmm0,%xmm4
movdqu %xmm0,64(%rdi)
.byte 102,15,58,15,227,8
movdqu %xmm4,80(%rdi)
ret
.cfi_endproc
.size gcm_init_clmul,.-gcm_init_clmul
.globl gcm_gmult_clmul
.hidden gcm_gmult_clmul
.type gcm_gmult_clmul,@function
.align 16
gcm_gmult_clmul:
.cfi_startproc
_CET_ENDBR
.L_gmult_clmul:
movdqu (%rdi),%xmm0
movdqa .Lbswap_mask(%rip),%xmm5
movdqu (%rsi),%xmm2
movdqu 32(%rsi),%xmm4
.byte 102,15,56,0,197
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.byte 102,15,56,0,197
movdqu %xmm0,(%rdi)
ret
.cfi_endproc
.size gcm_gmult_clmul,.-gcm_gmult_clmul
.globl gcm_ghash_clmul
.hidden gcm_ghash_clmul
.type gcm_ghash_clmul,@function
.align 32
gcm_ghash_clmul:
.cfi_startproc
_CET_ENDBR
.L_ghash_clmul:
movdqa .Lbswap_mask(%rip),%xmm10
movdqu (%rdi),%xmm0
movdqu (%rsi),%xmm2
movdqu 32(%rsi),%xmm7
.byte 102,65,15,56,0,194
subq $0x10,%rcx
jz .Lodd_tail
movdqu 16(%rsi),%xmm6
leaq OPENSSL_ia32cap_P(%rip),%rax
movl 4(%rax),%eax
cmpq $0x30,%rcx
jb .Lskip4x
andl $71303168,%eax
cmpl $4194304,%eax
je .Lskip4x
subq $0x30,%rcx
movq $0xA040608020C0E000,%rax
movdqu 48(%rsi),%xmm14
movdqu 64(%rsi),%xmm15
movdqu 48(%rdx),%xmm3
movdqu 32(%rdx),%xmm11
.byte 102,65,15,56,0,218
.byte 102,69,15,56,0,218
movdqa %xmm3,%xmm5
pshufd $78,%xmm3,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,68,218,0
.byte 102,15,58,68,234,17
.byte 102,15,58,68,231,0
movdqa %xmm11,%xmm13
pshufd $78,%xmm11,%xmm12
pxor %xmm11,%xmm12
.byte 102,68,15,58,68,222,0
.byte 102,68,15,58,68,238,17
.byte 102,68,15,58,68,231,16
xorps %xmm11,%xmm3
xorps %xmm13,%xmm5
movups 80(%rsi),%xmm7
xorps %xmm12,%xmm4
movdqu 16(%rdx),%xmm11
movdqu 0(%rdx),%xmm8
.byte 102,69,15,56,0,218
.byte 102,69,15,56,0,194
movdqa %xmm11,%xmm13
pshufd $78,%xmm11,%xmm12
pxor %xmm8,%xmm0
pxor %xmm11,%xmm12
.byte 102,69,15,58,68,222,0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm8
pxor %xmm0,%xmm8
.byte 102,69,15,58,68,238,17
.byte 102,68,15,58,68,231,0
xorps %xmm11,%xmm3
xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx
subq $0x40,%rcx
jc .Ltail4x
jmp .Lmod4_loop
.align 32
.Lmod4_loop:
.byte 102,65,15,58,68,199,0
xorps %xmm12,%xmm4
movdqu 48(%rdx),%xmm11
.byte 102,69,15,56,0,218
.byte 102,65,15,58,68,207,17
xorps %xmm3,%xmm0
movdqu 32(%rdx),%xmm3
movdqa %xmm11,%xmm13
.byte 102,68,15,58,68,199,16
pshufd $78,%xmm11,%xmm12
xorps %xmm5,%xmm1
pxor %xmm11,%xmm12
.byte 102,65,15,56,0,218
movups 32(%rsi),%xmm7
xorps %xmm4,%xmm8
.byte 102,68,15,58,68,218,0
pshufd $78,%xmm3,%xmm4
pxor %xmm0,%xmm8
movdqa %xmm3,%xmm5
pxor %xmm1,%xmm8
pxor %xmm3,%xmm4
movdqa %xmm8,%xmm9
.byte 102,68,15,58,68,234,17
pslldq $8,%xmm8
psrldq $8,%xmm9
pxor %xmm8,%xmm0
movdqa .L7_mask(%rip),%xmm8
pxor %xmm9,%xmm1
.byte 102,76,15,110,200
pand %xmm0,%xmm8
.byte 102,69,15,56,0,200
pxor %xmm0,%xmm9
.byte 102,68,15,58,68,231,0
psllq $57,%xmm9
movdqa %xmm9,%xmm8
pslldq $8,%xmm9
.byte 102,15,58,68,222,0
psrldq $8,%xmm8
pxor %xmm9,%xmm0
pxor %xmm8,%xmm1
movdqu 0(%rdx),%xmm8
movdqa %xmm0,%xmm9
psrlq $1,%xmm0
.byte 102,15,58,68,238,17
xorps %xmm11,%xmm3
movdqu 16(%rdx),%xmm11
.byte 102,69,15,56,0,218
.byte 102,15,58,68,231,16
xorps %xmm13,%xmm5
movups 80(%rsi),%xmm7
.byte 102,69,15,56,0,194
pxor %xmm9,%xmm1
pxor %xmm0,%xmm9
psrlq $5,%xmm0
movdqa %xmm11,%xmm13
pxor %xmm12,%xmm4
pshufd $78,%xmm11,%xmm12
pxor %xmm9,%xmm0
pxor %xmm8,%xmm1
pxor %xmm11,%xmm12
.byte 102,69,15,58,68,222,0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
movdqa %xmm0,%xmm1
.byte 102,69,15,58,68,238,17
xorps %xmm11,%xmm3
pshufd $78,%xmm0,%xmm8
pxor %xmm0,%xmm8
.byte 102,68,15,58,68,231,0
xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx
subq $0x40,%rcx
jnc .Lmod4_loop
.Ltail4x:
.byte 102,65,15,58,68,199,0
.byte 102,65,15,58,68,207,17
.byte 102,68,15,58,68,199,16
xorps %xmm12,%xmm4
xorps %xmm3,%xmm0
xorps %xmm5,%xmm1
pxor %xmm0,%xmm1
pxor %xmm4,%xmm8
pxor %xmm1,%xmm8
pxor %xmm0,%xmm1
movdqa %xmm8,%xmm9
psrldq $8,%xmm8
pslldq $8,%xmm9
pxor %xmm8,%xmm1
pxor %xmm9,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
addq $0x40,%rcx
jz .Ldone
movdqu 32(%rsi),%xmm7
subq $0x10,%rcx
jz .Lodd_tail
.Lskip4x:
movdqu (%rdx),%xmm8
movdqu 16(%rdx),%xmm3
.byte 102,69,15,56,0,194
.byte 102,65,15,56,0,218
pxor %xmm8,%xmm0
movdqa %xmm3,%xmm5
pshufd $78,%xmm3,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,68,218,0
.byte 102,15,58,68,234,17
.byte 102,15,58,68,231,0
leaq 32(%rdx),%rdx
nop
subq $0x20,%rcx
jbe .Leven_tail
nop
jmp .Lmod_loop
.align 32
.Lmod_loop:
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm8
pshufd $78,%xmm0,%xmm4
pxor %xmm0,%xmm4
.byte 102,15,58,68,198,0
.byte 102,15,58,68,206,17
.byte 102,15,58,68,231,16
pxor %xmm3,%xmm0
pxor %xmm5,%xmm1
movdqu (%rdx),%xmm9
pxor %xmm0,%xmm8
.byte 102,69,15,56,0,202
movdqu 16(%rdx),%xmm3
pxor %xmm1,%xmm8
pxor %xmm9,%xmm1
pxor %xmm8,%xmm4
.byte 102,65,15,56,0,218
movdqa %xmm4,%xmm8
psrldq $8,%xmm8
pslldq $8,%xmm4
pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm3,%xmm5
movdqa %xmm0,%xmm9
movdqa %xmm0,%xmm8
psllq $5,%xmm0
pxor %xmm0,%xmm8
.byte 102,15,58,68,218,0
psllq $1,%xmm0
pxor %xmm8,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm8
pslldq $8,%xmm0
psrldq $8,%xmm8
pxor %xmm9,%xmm0
pshufd $78,%xmm5,%xmm4
pxor %xmm8,%xmm1
pxor %xmm5,%xmm4
movdqa %xmm0,%xmm9
psrlq $1,%xmm0
.byte 102,15,58,68,234,17
pxor %xmm9,%xmm1
pxor %xmm0,%xmm9
psrlq $5,%xmm0
pxor %xmm9,%xmm0
leaq 32(%rdx),%rdx
psrlq $1,%xmm0
.byte 102,15,58,68,231,0
pxor %xmm1,%xmm0
subq $0x20,%rcx
ja .Lmod_loop
.Leven_tail:
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm8
pshufd $78,%xmm0,%xmm4
pxor %xmm0,%xmm4
.byte 102,15,58,68,198,0
.byte 102,15,58,68,206,17
.byte 102,15,58,68,231,16
pxor %xmm3,%xmm0
pxor %xmm5,%xmm1
pxor %xmm0,%xmm8
pxor %xmm1,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm8
psrldq $8,%xmm8
pslldq $8,%xmm4
pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
testq %rcx,%rcx
jnz .Ldone
.Lodd_tail:
movdqu (%rdx),%xmm8
.byte 102,69,15,56,0,194
pxor %xmm8,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,223,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.Ldone:
.byte 102,65,15,56,0,194
movdqu %xmm0,(%rdi)
ret
.cfi_endproc
.size gcm_ghash_clmul,.-gcm_ghash_clmul
.globl gcm_init_avx
.hidden gcm_init_avx
.type gcm_init_avx,@function
.align 32
gcm_init_avx:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqu (%rsi),%xmm2
vpshufd $78,%xmm2,%xmm2
vpshufd $255,%xmm2,%xmm4
vpsrlq $63,%xmm2,%xmm3
vpsllq $1,%xmm2,%xmm2
vpxor %xmm5,%xmm5,%xmm5
vpcmpgtd %xmm4,%xmm5,%xmm5
vpslldq $8,%xmm3,%xmm3
vpor %xmm3,%xmm2,%xmm2
vpand .L0x1c2_polynomial(%rip),%xmm5,%xmm5
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm2,%xmm2,%xmm6
vmovdqa %xmm2,%xmm0
vpxor %xmm2,%xmm6,%xmm6
movq $4,%r10
jmp .Linit_start_avx
.align 32
.Linit_loop_avx:
vpalignr $8,%xmm3,%xmm4,%xmm5
vmovdqu %xmm5,-16(%rdi)
vpunpckhqdq %xmm0,%xmm0,%xmm3
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3
vpxor %xmm0,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $8,%xmm3,%xmm4
vpsrldq $8,%xmm3,%xmm3
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm3,%xmm1,%xmm1
vpsllq $57,%xmm0,%xmm3
vpsllq $62,%xmm0,%xmm4
vpxor %xmm3,%xmm4,%xmm4
vpsllq $63,%xmm0,%xmm3
vpxor %xmm3,%xmm4,%xmm4
vpslldq $8,%xmm4,%xmm3
vpsrldq $8,%xmm4,%xmm4
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm4,%xmm1,%xmm1
vpsrlq $1,%xmm0,%xmm4
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $5,%xmm4,%xmm4
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $1,%xmm0,%xmm0
vpxor %xmm1,%xmm0,%xmm0
.Linit_start_avx:
vmovdqa %xmm0,%xmm5
vpunpckhqdq %xmm0,%xmm0,%xmm3
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3
vpxor %xmm0,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $8,%xmm3,%xmm4
vpsrldq $8,%xmm3,%xmm3
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm3,%xmm1,%xmm1
vpsllq $57,%xmm0,%xmm3
vpsllq $62,%xmm0,%xmm4
vpxor %xmm3,%xmm4,%xmm4
vpsllq $63,%xmm0,%xmm3
vpxor %xmm3,%xmm4,%xmm4
vpslldq $8,%xmm4,%xmm3
vpsrldq $8,%xmm4,%xmm4
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm4,%xmm1,%xmm1
vpsrlq $1,%xmm0,%xmm4
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $5,%xmm4,%xmm4
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $1,%xmm0,%xmm0
vpxor %xmm1,%xmm0,%xmm0
vpshufd $78,%xmm5,%xmm3
vpshufd $78,%xmm0,%xmm4
vpxor %xmm5,%xmm3,%xmm3
vmovdqu %xmm5,0(%rdi)
vpxor %xmm0,%xmm4,%xmm4
vmovdqu %xmm0,16(%rdi)
leaq 48(%rdi),%rdi
subq $1,%r10
jnz .Linit_loop_avx
vpalignr $8,%xmm4,%xmm3,%xmm5
vmovdqu %xmm5,-16(%rdi)
vzeroupper
ret
.cfi_endproc
.size gcm_init_avx,.-gcm_init_avx
.globl gcm_ghash_avx
.hidden gcm_ghash_avx
.type gcm_ghash_avx,@function
.align 32
gcm_ghash_avx:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqu (%rdi),%xmm10
leaq .L0x1c2_polynomial(%rip),%r10
leaq 64(%rsi),%rsi
vmovdqu .Lbswap_mask(%rip),%xmm13
vpshufb %xmm13,%xmm10,%xmm10
cmpq $0x80,%rcx
jb .Lshort_avx
subq $0x80,%rcx
vmovdqu 112(%rdx),%xmm14
vmovdqu 0-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm14
vmovdqu 32-64(%rsi),%xmm7
vpunpckhqdq %xmm14,%xmm14,%xmm9
vmovdqu 96(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm14,%xmm9,%xmm9
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 16-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vmovdqu 80(%rdx),%xmm14
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 48-64(%rsi),%xmm6
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 64(%rdx),%xmm15
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 80-64(%rsi),%xmm7
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vmovdqu 48(%rdx),%xmm14
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm4,%xmm1,%xmm1
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 96-64(%rsi),%xmm6
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 128-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 32(%rdx),%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vmovdqu 16(%rdx),%xmm14
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm4,%xmm1,%xmm1
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 144-64(%rsi),%xmm6
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 176-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu (%rdx),%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 160-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2
leaq 128(%rdx),%rdx
cmpq $0x80,%rcx
jb .Ltail_avx
vpxor %xmm10,%xmm15,%xmm15
subq $0x80,%rcx
jmp .Loop8x_avx
.align 32
.Loop8x_avx:
vpunpckhqdq %xmm15,%xmm15,%xmm8
vmovdqu 112(%rdx),%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpxor %xmm15,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm10
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm11
vmovdqu 0-64(%rsi),%xmm6
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm12
vmovdqu 32-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 96(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm3,%xmm10,%xmm10
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vxorps %xmm4,%xmm11,%xmm11
vmovdqu 16-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm5,%xmm12,%xmm12
vxorps %xmm15,%xmm8,%xmm8
vmovdqu 80(%rdx),%xmm14
vpxor %xmm10,%xmm12,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm11,%xmm12,%xmm12
vpslldq $8,%xmm12,%xmm9
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vpsrldq $8,%xmm12,%xmm12
vpxor %xmm9,%xmm10,%xmm10
vmovdqu 48-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm14
vxorps %xmm12,%xmm11,%xmm11
vpxor %xmm1,%xmm4,%xmm4
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 80-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 64(%rdx),%xmm15
vpalignr $8,%xmm10,%xmm10,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vxorps %xmm15,%xmm8,%xmm8
vpxor %xmm5,%xmm2,%xmm2
vmovdqu 48(%rdx),%xmm14
vpclmulqdq $0x10,(%r10),%xmm10,%xmm10
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 96-64(%rsi),%xmm6
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 128-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 32(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vpxor %xmm5,%xmm2,%xmm2
vxorps %xmm12,%xmm10,%xmm10
vmovdqu 16(%rdx),%xmm14
vpalignr $8,%xmm10,%xmm10,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 144-64(%rsi),%xmm6
vpclmulqdq $0x10,(%r10),%xmm10,%xmm10
vxorps %xmm11,%xmm12,%xmm12
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 176-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu (%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 160-64(%rsi),%xmm6
vpxor %xmm12,%xmm15,%xmm15
vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2
vpxor %xmm10,%xmm15,%xmm15
leaq 128(%rdx),%rdx
subq $0x80,%rcx
jnc .Loop8x_avx
addq $0x80,%rcx
jmp .Ltail_no_xor_avx
.align 32
.Lshort_avx:
vmovdqu -16(%rdx,%rcx,1),%xmm14
leaq (%rdx,%rcx,1),%rdx
vmovdqu 0-64(%rsi),%xmm6
vmovdqu 32-64(%rsi),%xmm7
vpshufb %xmm13,%xmm14,%xmm15
vmovdqa %xmm0,%xmm3
vmovdqa %xmm1,%xmm4
vmovdqa %xmm2,%xmm5
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -32(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 16-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -48(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 48-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu 80-64(%rsi),%xmm7
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -64(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -80(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 96-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu 128-64(%rsi),%xmm7
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -96(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -112(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 144-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovq 184-64(%rsi),%xmm7
subq $0x10,%rcx
jmp .Ltail_avx
.align 32
.Ltail_avx:
vpxor %xmm10,%xmm15,%xmm15
.Ltail_no_xor_avx:
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu (%r10),%xmm12
vpxor %xmm0,%xmm3,%xmm10
vpxor %xmm1,%xmm4,%xmm11
vpxor %xmm2,%xmm5,%xmm5
vpxor %xmm10,%xmm5,%xmm5
vpxor %xmm11,%xmm5,%xmm5
vpslldq $8,%xmm5,%xmm9
vpsrldq $8,%xmm5,%xmm5
vpxor %xmm9,%xmm10,%xmm10
vpxor %xmm5,%xmm11,%xmm11
vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9
vpalignr $8,%xmm10,%xmm10,%xmm10
vpxor %xmm9,%xmm10,%xmm10
vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9
vpalignr $8,%xmm10,%xmm10,%xmm10
vpxor %xmm11,%xmm10,%xmm10
vpxor %xmm9,%xmm10,%xmm10
cmpq $0,%rcx
jne .Lshort_avx
vpshufb %xmm13,%xmm10,%xmm10
vmovdqu %xmm10,(%rdi)
vzeroupper
ret
.cfi_endproc
.size gcm_ghash_avx,.-gcm_ghash_avx
.section .rodata
.align 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.L0x1c2_polynomial:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
.L7_mask:
.long 7,0,7,0
.align 64
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.text
#endif
|
chairq/First-choice
| 12,580
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/vpaes-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.type _vpaes_encrypt_core,@function
.align 16
_vpaes_encrypt_core:
.cfi_startproc
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa .Lk_ipt(%rip),%xmm2
pandn %xmm0,%xmm1
movdqu (%r9),%xmm5
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,208
movdqa .Lk_ipt+16(%rip),%xmm0
.byte 102,15,56,0,193
pxor %xmm5,%xmm2
addq $16,%r9
pxor %xmm2,%xmm0
leaq .Lk_mc_backward(%rip),%r10
jmp .Lenc_entry
.align 16
.Lenc_loop:
movdqa %xmm13,%xmm4
movdqa %xmm12,%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa %xmm15,%xmm5
pxor %xmm4,%xmm0
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
movdqa (%r11,%r10,1),%xmm4
movdqa %xmm14,%xmm2
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addq $16,%r9
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
.Lenc_entry:
movdqa %xmm9,%xmm1
movdqa %xmm11,%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,232
movdqa %xmm10,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm10,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm10,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm10,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
jnz .Lenc_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.cfi_endproc
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.type _vpaes_encrypt_core_2x,@function
.align 16
_vpaes_encrypt_core_2x:
.cfi_startproc
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa .Lk_ipt(%rip),%xmm2
movdqa %xmm2,%xmm8
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
movdqu (%r9),%xmm5
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,208
.byte 102,68,15,56,0,198
movdqa .Lk_ipt+16(%rip),%xmm0
movdqa %xmm0,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,247
pxor %xmm5,%xmm2
pxor %xmm5,%xmm8
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
leaq .Lk_mc_backward(%rip),%r10
jmp .Lenc2x_entry
.align 16
.Lenc2x_loop:
movdqa .Lk_sb1(%rip),%xmm4
movdqa .Lk_sb1+16(%rip),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
movdqa .Lk_sb2(%rip),%xmm5
movdqa %xmm5,%xmm13
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
.byte 102,69,15,56,0,232
movdqa (%r11,%r10,1),%xmm4
movdqa .Lk_sb2+16(%rip),%xmm2
movdqa %xmm2,%xmm8
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm0,%xmm3
movdqa %xmm6,%xmm11
pxor %xmm5,%xmm2
pxor %xmm13,%xmm8
.byte 102,15,56,0,193
.byte 102,15,56,0,241
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
.byte 102,15,56,0,220
.byte 102,68,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
pxor %xmm6,%xmm11
.byte 102,15,56,0,193
.byte 102,15,56,0,241
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
pxor %xmm11,%xmm6
.Lenc2x_entry:
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa .Lk_inv+16(%rip),%xmm5
movdqa %xmm5,%xmm13
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,232
.byte 102,68,15,56,0,238
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm1,%xmm0
pxor %xmm7,%xmm6
.byte 102,15,56,0,217
.byte 102,68,15,56,0,223
movdqa %xmm10,%xmm4
movdqa %xmm10,%xmm12
pxor %xmm5,%xmm3
pxor %xmm13,%xmm11
.byte 102,15,56,0,224
.byte 102,68,15,56,0,230
movdqa %xmm10,%xmm2
movdqa %xmm10,%xmm8
pxor %xmm5,%xmm4
pxor %xmm13,%xmm12
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm0,%xmm2
pxor %xmm6,%xmm8
.byte 102,15,56,0,220
.byte 102,69,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
pxor %xmm7,%xmm11
jnz .Lenc2x_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
ret
.cfi_endproc
.size _vpaes_encrypt_core_2x,.-_vpaes_encrypt_core_2x
.type _vpaes_schedule_core,@function
.align 16
_vpaes_schedule_core:
.cfi_startproc
call _vpaes_preheat
movdqa .Lk_rcon(%rip),%xmm8
movdqu (%rdi),%xmm0
movdqa %xmm0,%xmm3
leaq .Lk_ipt(%rip),%r11
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
leaq .Lk_sr(%rip),%r10
movdqu %xmm0,(%rdx)
.Lschedule_go:
cmpl $192,%esi
ja .Lschedule_256
.Lschedule_128:
movl $10,%esi
.Loop_schedule_128:
call _vpaes_schedule_round
decq %rsi
jz .Lschedule_mangle_last
call _vpaes_schedule_mangle
jmp .Loop_schedule_128
.align 16
.Lschedule_256:
movdqu 16(%rdi),%xmm0
call _vpaes_schedule_transform
movl $7,%esi
.Loop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decq %rsi
jz .Lschedule_mangle_last
call _vpaes_schedule_mangle
pshufd $0xFF,%xmm0,%xmm0
movdqa %xmm7,%xmm5
movdqa %xmm6,%xmm7
call _vpaes_schedule_low_round
movdqa %xmm5,%xmm7
jmp .Loop_schedule_256
.align 16
.Lschedule_mangle_last:
leaq .Lk_deskew(%rip),%r11
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,193
leaq .Lk_opt(%rip),%r11
addq $32,%rdx
.Lschedule_mangle_last_dec:
addq $-16,%rdx
pxor .Lk_s63(%rip),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%rdx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.cfi_endproc
.size _vpaes_schedule_core,.-_vpaes_schedule_core
.type _vpaes_schedule_round,@function
.align 16
_vpaes_schedule_round:
.cfi_startproc
pxor %xmm1,%xmm1
.byte 102,65,15,58,15,200,15
.byte 102,69,15,58,15,192,15
pxor %xmm1,%xmm7
pshufd $0xFF,%xmm0,%xmm0
.byte 102,15,58,15,192,1
_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor .Lk_s63(%rip),%xmm7
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa %xmm11,%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm10,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm10,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm10,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm10,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa %xmm13,%xmm4
.byte 102,15,56,0,226
movdqa %xmm12,%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.cfi_endproc
.size _vpaes_schedule_round,.-_vpaes_schedule_round
.type _vpaes_schedule_transform,@function
.align 16
_vpaes_schedule_transform:
.cfi_startproc
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa (%r11),%xmm2
.byte 102,15,56,0,208
movdqa 16(%r11),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.cfi_endproc
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
.type _vpaes_schedule_mangle,@function
.align 16
_vpaes_schedule_mangle:
.cfi_startproc
movdqa %xmm0,%xmm4
movdqa .Lk_mc_forward(%rip),%xmm5
addq $16,%rdx
pxor .Lk_s63(%rip),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.Lschedule_mangle_both:
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217
addq $-16,%r8
andq $0x30,%r8
movdqu %xmm3,(%rdx)
ret
.cfi_endproc
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,@function
.align 16
vpaes_set_encrypt_key:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
.extern BORINGSSL_function_hit
.hidden BORINGSSL_function_hit
movb $1,BORINGSSL_function_hit+5(%rip)
#endif
movl %esi,%eax
shrl $5,%eax
addl $5,%eax
movl %eax,240(%rdx)
movl $0,%ecx
movl $0x30,%r8d
call _vpaes_schedule_core
xorl %eax,%eax
ret
.cfi_endproc
.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
.globl vpaes_encrypt
.hidden vpaes_encrypt
.type vpaes_encrypt,@function
.align 16
vpaes_encrypt:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
.extern BORINGSSL_function_hit
.hidden BORINGSSL_function_hit
movb $1,BORINGSSL_function_hit+4(%rip)
#endif
movdqu (%rdi),%xmm0
call _vpaes_preheat
call _vpaes_encrypt_core
movdqu %xmm0,(%rsi)
ret
.cfi_endproc
.size vpaes_encrypt,.-vpaes_encrypt
.globl vpaes_ctr32_encrypt_blocks
.hidden vpaes_ctr32_encrypt_blocks
.type vpaes_ctr32_encrypt_blocks,@function
.align 16
vpaes_ctr32_encrypt_blocks:
.cfi_startproc
_CET_ENDBR
xchgq %rcx,%rdx
testq %rcx,%rcx
jz .Lctr32_abort
movdqu (%r8),%xmm0
movdqa .Lctr_add_one(%rip),%xmm8
subq %rdi,%rsi
call _vpaes_preheat
movdqa %xmm0,%xmm6
pshufb .Lrev_ctr(%rip),%xmm6
testq $1,%rcx
jz .Lctr32_prep_loop
movdqu (%rdi),%xmm7
call _vpaes_encrypt_core
pxor %xmm7,%xmm0
paddd %xmm8,%xmm6
movdqu %xmm0,(%rsi,%rdi,1)
subq $1,%rcx
leaq 16(%rdi),%rdi
jz .Lctr32_done
.Lctr32_prep_loop:
movdqa %xmm6,%xmm14
movdqa %xmm6,%xmm15
paddd %xmm8,%xmm15
.Lctr32_loop:
movdqa .Lrev_ctr(%rip),%xmm1
movdqa %xmm14,%xmm0
movdqa %xmm15,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
call _vpaes_encrypt_core_2x
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa .Lctr_add_two(%rip),%xmm3
pxor %xmm1,%xmm0
pxor %xmm2,%xmm6
paddd %xmm3,%xmm14
paddd %xmm3,%xmm15
movdqu %xmm0,(%rsi,%rdi,1)
movdqu %xmm6,16(%rsi,%rdi,1)
subq $2,%rcx
leaq 32(%rdi),%rdi
jnz .Lctr32_loop
.Lctr32_done:
.Lctr32_abort:
ret
.cfi_endproc
.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks
.type _vpaes_preheat,@function
.align 16
_vpaes_preheat:
.cfi_startproc
leaq .Lk_s0F(%rip),%r10
movdqa -32(%r10),%xmm10
movdqa -16(%r10),%xmm11
movdqa 0(%r10),%xmm9
movdqa 48(%r10),%xmm13
movdqa 64(%r10),%xmm12
movdqa 80(%r10),%xmm15
movdqa 96(%r10),%xmm14
ret
.cfi_endproc
.size _vpaes_preheat,.-_vpaes_preheat
.type _vpaes_consts,@object
.section .rodata
.align 64
_vpaes_consts:
.Lk_inv:
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
.Lk_s0F:
.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
.Lk_ipt:
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
.Lk_sb1:
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.Lk_sb2:
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.Lk_sbo:
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
.Lk_mc_forward:
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
.Lk_mc_backward:
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
.Lk_sr:
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
.Lk_rcon:
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
.Lk_s63:
.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
.Lk_opt:
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
.Lk_deskew:
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.Lrev_ctr:
.quad 0x0706050403020100, 0x0c0d0e0f0b0a0908
.Lctr_add_one:
.quad 0x0000000000000000, 0x0000000100000000
.Lctr_add_two:
.quad 0x0000000000000000, 0x0000000200000000
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 64
.size _vpaes_consts,.-_vpaes_consts
.text
#endif
|
chairq/First-choice
| 49,073
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/sha512-armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the OpenSSL license (the "License"). You may not use
// this file except in compliance with the License. You can obtain a copy
// in the file LICENSE in the source distribution or at
// https://www.openssl.org/source/license.html
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project. The module is, however, dual licensed under OpenSSL and
// CRYPTOGAMS licenses depending on where you obtain it. For further
// details see http://www.openssl.org/~appro/cryptogams/.
//
// Permission to use under GPLv2 terms is granted.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
# include <ring-core/arm_arch.h>
#endif
.text
.globl sha512_block_data_order
.def sha512_block_data_order
.type 32
.endef
.align 6
sha512_block_data_order:
AARCH64_VALID_CALL_TARGET
#ifndef __KERNEL__
#if defined(OPENSSL_HWASAN) && __clang_major__ >= 10
adrp x16,:pg_hi21_nc:OPENSSL_armcap_P
#else
adrp x16,OPENSSL_armcap_P
#endif
ldr w16,[x16,:lo12:OPENSSL_armcap_P]
tst w16,#ARMV8_SHA512
b.ne Lv8_entry
#endif
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*8
ldp x20,x21,[x0] // load context
ldp x22,x23,[x0,#2*8]
ldp x24,x25,[x0,#4*8]
add x2,x1,x2,lsl#7 // end of input
ldp x26,x27,[x0,#6*8]
adrp x30,LK512
add x30,x30,:lo12:LK512
stp x0,x2,[x29,#96]
Loop:
ldp x3,x4,[x1],#2*8
ldr x19,[x30],#8 // *K++
eor x28,x21,x22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev x3,x3 // 0
#endif
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x6,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x3 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x4,x4 // 1
#endif
ldp x5,x6,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x7,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x4 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x5,x5 // 2
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x8,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x5 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x6,x6 // 3
#endif
ldp x7,x8,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x9,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x6 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x7,x7 // 4
#endif
add x24,x24,x17 // h+=Sigma0(a)
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x10,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x7 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x10,ror#18 // Sigma1(e)
ror x10,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x10,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x8,x8 // 5
#endif
ldp x9,x10,[x1],#2*8
add x23,x23,x17 // h+=Sigma0(a)
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x11,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x8 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x11,ror#18 // Sigma1(e)
ror x11,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x11,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x9,x9 // 6
#endif
add x22,x22,x17 // h+=Sigma0(a)
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x12,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x9 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x12,ror#18 // Sigma1(e)
ror x12,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x12,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x10,x10 // 7
#endif
ldp x11,x12,[x1],#2*8
add x21,x21,x17 // h+=Sigma0(a)
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
eor x13,x25,x25,ror#23
and x17,x26,x25
bic x28,x27,x25
add x20,x20,x10 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x13,ror#18 // Sigma1(e)
ror x13,x21,#28
add x20,x20,x17 // h+=Ch(e,f,g)
eor x17,x21,x21,ror#5
add x20,x20,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x24,x24,x20 // d+=h
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x13,x17,ror#34 // Sigma0(a)
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x20,x20,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x11,x11 // 8
#endif
add x20,x20,x17 // h+=Sigma0(a)
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x14,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x11 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x14,ror#18 // Sigma1(e)
ror x14,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x14,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x12,x12 // 9
#endif
ldp x13,x14,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x15,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x12 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x15,ror#18 // Sigma1(e)
ror x15,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x15,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x13,x13 // 10
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x0,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x13 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x0,ror#18 // Sigma1(e)
ror x0,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x0,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x14,x14 // 11
#endif
ldp x15,x0,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x6,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x14 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x15,x15 // 12
#endif
add x24,x24,x17 // h+=Sigma0(a)
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x7,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x15 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x0,x0 // 13
#endif
ldp x1,x2,[x1]
add x23,x23,x17 // h+=Sigma0(a)
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x8,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x0 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x1,x1 // 14
#endif
ldr x6,[sp,#24]
add x22,x22,x17 // h+=Sigma0(a)
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x9,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x1 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x2,x2 // 15
#endif
ldr x7,[sp,#0]
add x21,x21,x17 // h+=Sigma0(a)
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
Loop_16_xx:
ldr x8,[sp,#8]
str x11,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x10,x5,#1
and x17,x25,x24
ror x9,x2,#19
bic x19,x26,x24
ror x11,x20,#28
add x27,x27,x3 // h+=X[i]
eor x16,x16,x24,ror#18
eor x10,x10,x5,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x11,x11,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x9,x9,x2,ror#61
eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x11,x20,ror#39 // Sigma0(a)
eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
add x4,x4,x13
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x4,x4,x10
add x27,x27,x17 // h+=Sigma0(a)
add x4,x4,x9
ldr x9,[sp,#16]
str x12,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x11,x6,#1
and x17,x24,x23
ror x10,x3,#19
bic x28,x25,x23
ror x12,x27,#28
add x26,x26,x4 // h+=X[i]
eor x16,x16,x23,ror#18
eor x11,x11,x6,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x12,x12,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x10,x10,x3,ror#61
eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x12,x27,ror#39 // Sigma0(a)
eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
add x5,x5,x14
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x5,x5,x11
add x26,x26,x17 // h+=Sigma0(a)
add x5,x5,x10
ldr x10,[sp,#24]
str x13,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x12,x7,#1
and x17,x23,x22
ror x11,x4,#19
bic x19,x24,x22
ror x13,x26,#28
add x25,x25,x5 // h+=X[i]
eor x16,x16,x22,ror#18
eor x12,x12,x7,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x13,x13,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x11,x11,x4,ror#61
eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x13,x26,ror#39 // Sigma0(a)
eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
add x6,x6,x15
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x6,x6,x12
add x25,x25,x17 // h+=Sigma0(a)
add x6,x6,x11
ldr x11,[sp,#0]
str x14,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x13,x8,#1
and x17,x22,x21
ror x12,x5,#19
bic x28,x23,x21
ror x14,x25,#28
add x24,x24,x6 // h+=X[i]
eor x16,x16,x21,ror#18
eor x13,x13,x8,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x14,x14,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x12,x12,x5,ror#61
eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x14,x25,ror#39 // Sigma0(a)
eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
add x7,x7,x0
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x7,x7,x13
add x24,x24,x17 // h+=Sigma0(a)
add x7,x7,x12
ldr x12,[sp,#8]
str x15,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x14,x9,#1
and x17,x21,x20
ror x13,x6,#19
bic x19,x22,x20
ror x15,x24,#28
add x23,x23,x7 // h+=X[i]
eor x16,x16,x20,ror#18
eor x14,x14,x9,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x15,x15,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x13,x13,x6,ror#61
eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x15,x24,ror#39 // Sigma0(a)
eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
add x8,x8,x1
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x8,x8,x14
add x23,x23,x17 // h+=Sigma0(a)
add x8,x8,x13
ldr x13,[sp,#16]
str x0,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x15,x10,#1
and x17,x20,x27
ror x14,x7,#19
bic x28,x21,x27
ror x0,x23,#28
add x22,x22,x8 // h+=X[i]
eor x16,x16,x27,ror#18
eor x15,x15,x10,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x0,x0,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x14,x14,x7,ror#61
eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x0,x23,ror#39 // Sigma0(a)
eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
add x9,x9,x2
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x9,x9,x15
add x22,x22,x17 // h+=Sigma0(a)
add x9,x9,x14
ldr x14,[sp,#24]
str x1,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x0,x11,#1
and x17,x27,x26
ror x15,x8,#19
bic x19,x20,x26
ror x1,x22,#28
add x21,x21,x9 // h+=X[i]
eor x16,x16,x26,ror#18
eor x0,x0,x11,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x1,x1,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x15,x15,x8,ror#61
eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x1,x22,ror#39 // Sigma0(a)
eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
add x10,x10,x3
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x10,x10,x0
add x21,x21,x17 // h+=Sigma0(a)
add x10,x10,x15
ldr x15,[sp,#0]
str x2,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x1,x12,#1
and x17,x26,x25
ror x0,x9,#19
bic x28,x27,x25
ror x2,x21,#28
add x20,x20,x10 // h+=X[i]
eor x16,x16,x25,ror#18
eor x1,x1,x12,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x2,x2,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x0,x0,x9,ror#61
eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x2,x21,ror#39 // Sigma0(a)
eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
add x11,x11,x4
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x11,x11,x1
add x20,x20,x17 // h+=Sigma0(a)
add x11,x11,x0
ldr x0,[sp,#8]
str x3,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x2,x13,#1
and x17,x25,x24
ror x1,x10,#19
bic x19,x26,x24
ror x3,x20,#28
add x27,x27,x11 // h+=X[i]
eor x16,x16,x24,ror#18
eor x2,x2,x13,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x3,x3,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x1,x1,x10,ror#61
eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x3,x20,ror#39 // Sigma0(a)
eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
add x12,x12,x5
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x12,x12,x2
add x27,x27,x17 // h+=Sigma0(a)
add x12,x12,x1
ldr x1,[sp,#16]
str x4,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x3,x14,#1
and x17,x24,x23
ror x2,x11,#19
bic x28,x25,x23
ror x4,x27,#28
add x26,x26,x12 // h+=X[i]
eor x16,x16,x23,ror#18
eor x3,x3,x14,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x4,x4,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x2,x2,x11,ror#61
eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x4,x27,ror#39 // Sigma0(a)
eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
add x13,x13,x6
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x13,x13,x3
add x26,x26,x17 // h+=Sigma0(a)
add x13,x13,x2
ldr x2,[sp,#24]
str x5,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x4,x15,#1
and x17,x23,x22
ror x3,x12,#19
bic x19,x24,x22
ror x5,x26,#28
add x25,x25,x13 // h+=X[i]
eor x16,x16,x22,ror#18
eor x4,x4,x15,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x5,x5,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x3,x3,x12,ror#61
eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x5,x26,ror#39 // Sigma0(a)
eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
add x14,x14,x7
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x14,x14,x4
add x25,x25,x17 // h+=Sigma0(a)
add x14,x14,x3
ldr x3,[sp,#0]
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x5,x0,#1
and x17,x22,x21
ror x4,x13,#19
bic x28,x23,x21
ror x6,x25,#28
add x24,x24,x14 // h+=X[i]
eor x16,x16,x21,ror#18
eor x5,x5,x0,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x6,x6,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x4,x4,x13,ror#61
eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x25,ror#39 // Sigma0(a)
eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
add x15,x15,x8
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x15,x15,x5
add x24,x24,x17 // h+=Sigma0(a)
add x15,x15,x4
ldr x4,[sp,#8]
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x6,x1,#1
and x17,x21,x20
ror x5,x14,#19
bic x19,x22,x20
ror x7,x24,#28
add x23,x23,x15 // h+=X[i]
eor x16,x16,x20,ror#18
eor x6,x6,x1,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x7,x7,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x5,x5,x14,ror#61
eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x24,ror#39 // Sigma0(a)
eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
add x0,x0,x9
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x0,x0,x6
add x23,x23,x17 // h+=Sigma0(a)
add x0,x0,x5
ldr x5,[sp,#16]
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x7,x2,#1
and x17,x20,x27
ror x6,x15,#19
bic x28,x21,x27
ror x8,x23,#28
add x22,x22,x0 // h+=X[i]
eor x16,x16,x27,ror#18
eor x7,x7,x2,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x8,x8,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x6,x6,x15,ror#61
eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x23,ror#39 // Sigma0(a)
eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
add x1,x1,x10
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x1,x1,x7
add x22,x22,x17 // h+=Sigma0(a)
add x1,x1,x6
ldr x6,[sp,#24]
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x8,x3,#1
and x17,x27,x26
ror x7,x0,#19
bic x19,x20,x26
ror x9,x22,#28
add x21,x21,x1 // h+=X[i]
eor x16,x16,x26,ror#18
eor x8,x8,x3,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x9,x9,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x7,x7,x0,ror#61
eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x22,ror#39 // Sigma0(a)
eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
add x2,x2,x11
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x2,x2,x8
add x21,x21,x17 // h+=Sigma0(a)
add x2,x2,x7
ldr x7,[sp,#0]
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
cbnz x19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#648 // rewind
ldp x3,x4,[x0]
ldp x5,x6,[x0,#2*8]
add x1,x1,#14*8 // advance input pointer
ldp x7,x8,[x0,#4*8]
add x20,x20,x3
ldp x9,x10,[x0,#6*8]
add x21,x21,x4
add x22,x22,x5
add x23,x23,x6
stp x20,x21,[x0]
add x24,x24,x7
add x25,x25,x8
stp x22,x23,[x0,#2*8]
add x26,x26,x9
add x27,x27,x10
cmp x1,x2
stp x24,x25,[x0,#4*8]
stp x26,x27,[x0,#6*8]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*8
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section .rodata
.align 6
LK512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0 // terminator
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.def sha512_block_armv8
.type 32
.endef
.align 6
sha512_block_armv8:
Lv8_entry:
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context
adrp x3,LK512
add x3,x3,:lo12:LK512
rev64 v16.16b,v16.16b
rev64 v17.16b,v17.16b
rev64 v18.16b,v18.16b
rev64 v19.16b,v19.16b
rev64 v20.16b,v20.16b
rev64 v21.16b,v21.16b
rev64 v22.16b,v22.16b
rev64 v23.16b,v23.16b
b Loop_hw
.align 4
Loop_hw:
ld1 {v24.2d},[x3],#16
subs x2,x2,#1
sub x4,x1,#128
orr v26.16b,v0.16b,v0.16b // offload
orr v27.16b,v1.16b,v1.16b
orr v28.16b,v2.16b,v2.16b
orr v29.16b,v3.16b,v3.16b
csel x1,x1,x4,ne // conditional rewind
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v16.2d
ld1 {v16.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v16.16b,v16.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v17.2d
ld1 {v17.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v17.16b,v17.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v18.2d
ld1 {v18.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v18.16b,v18.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v19.2d
ld1 {v19.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
rev64 v19.16b,v19.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v20.2d
ld1 {v20.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
rev64 v20.16b,v20.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v21.2d
ld1 {v21.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v21.16b,v21.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v22.2d
ld1 {v22.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v22.16b,v22.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
sub x3,x3,#80*8 // rewind
add v25.2d,v25.2d,v23.2d
ld1 {v23.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v23.16b,v23.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v0.2d,v0.2d,v26.2d // accumulate
add v1.2d,v1.2d,v27.2d
add v2.2d,v2.2d,v28.2d
add v3.2d,v3.2d,v29.2d
cbnz x2,Loop_hw
st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
chairq/First-choice
| 14,482
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/ghashv8-armx-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#include <ring-core/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.globl _gcm_init_clmul
.private_extern _gcm_init_clmul
.align 4
_gcm_init_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.globl _gcm_gmult_clmul
.private_extern _gcm_gmult_clmul
.align 4
_gcm_gmult_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.globl _gcm_ghash_clmul
.private_extern _gcm_ghash_clmul
.align 4
_gcm_ghash_clmul:
AARCH64_VALID_CALL_TARGET
cmp x3,#64
b.hs Lgcm_ghash_v8_4x
ld1 {v0.2d},[x0] //load [rotated] Xi
//"[rotated]" means that
//loaded value would have
//to be rotated in order to
//make it appear as in
//algorithm specification
subs x3,x3,#32 //see if x3 is 32 or larger
mov x12,#16 //x12 is used as post-
//increment for input pointer;
//as loop is modulo-scheduled
//x12 is zeroed just in time
//to preclude overstepping
//inp[len], which means that
//last block[s] are actually
//loaded twice, but last
//copy is not processed
ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2
movi v19.16b,#0xe1
ld1 {v22.2d},[x1]
csel x12,xzr,x12,eq //is it time to zero x12?
ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi
ld1 {v16.2d},[x2],#16 //load [rotated] I[0]
shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
#ifndef __AARCH64EB__
rev64 v16.16b,v16.16b
rev64 v0.16b,v0.16b
#endif
ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0]
b.lo Lodd_tail_v8 //x3 was less than 32
ld1 {v17.2d},[x2],x12 //load [rotated] I[1]
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v7.16b,v17.16b,v17.16b,#8
eor v3.16b,v3.16b,v0.16b //I[i]^=Xi
pmull v4.1q,v20.1d,v7.1d //H·Ii+1
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
pmull2 v6.1q,v20.2d,v7.2d
b Loop_mod2x_v8
.align 4
Loop_mod2x_v8:
ext v18.16b,v3.16b,v3.16b,#8
subs x3,x3,#32 //is there more data?
pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo
csel x12,xzr,x12,lo //is it time to zero x12?
pmull v5.1q,v21.1d,v17.1d
eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi
eor v0.16b,v0.16b,v4.16b //accumulate
pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2]
eor v2.16b,v2.16b,v6.16b
csel x12,xzr,x12,eq //is it time to zero x12?
eor v1.16b,v1.16b,v5.16b
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3]
#ifndef __AARCH64EB__
rev64 v16.16b,v16.16b
#endif
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v7.16b,v17.16b,v17.16b,#8
ext v3.16b,v16.16b,v16.16b,#8
eor v0.16b,v1.16b,v18.16b
pmull v4.1q,v20.1d,v7.1d //H·Ii+1
eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v3.16b,v3.16b,v18.16b
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
eor v3.16b,v3.16b,v0.16b
pmull2 v6.1q,v20.2d,v7.2d
b.hs Loop_mod2x_v8 //there was at least 32 more bytes
eor v2.16b,v2.16b,v18.16b
ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b
adds x3,x3,#32 //re-construct x3
eor v0.16b,v0.16b,v2.16b //re-construct v0.16b
b.eq Ldone_v8 //is x3 zero?
Lodd_tail_v8:
ext v18.16b,v0.16b,v0.16b,#8
eor v3.16b,v3.16b,v0.16b //inp^=Xi
eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
Ldone_v8:
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.align 4
gcm_ghash_v8_4x:
Lgcm_ghash_v8_4x:
ld1 {v0.2d},[x0] //load [rotated] Xi
ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2
movi v19.16b,#0xe1
ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4
shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v7.16b,v7.16b
rev64 v4.16b,v4.16b
#endif
ext v25.16b,v7.16b,v7.16b,#8
ext v24.16b,v6.16b,v6.16b,#8
ext v23.16b,v5.16b,v5.16b,#8
pmull v29.1q,v20.1d,v25.1d //H·Ii+3
eor v7.16b,v7.16b,v25.16b
pmull2 v31.1q,v20.2d,v25.2d
pmull v30.1q,v21.1d,v7.1d
pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
eor v6.16b,v6.16b,v24.16b
pmull2 v24.1q,v22.2d,v24.2d
pmull2 v6.1q,v21.2d,v6.2d
eor v29.16b,v29.16b,v16.16b
eor v31.16b,v31.16b,v24.16b
eor v30.16b,v30.16b,v6.16b
pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
eor v5.16b,v5.16b,v23.16b
pmull2 v23.1q,v26.2d,v23.2d
pmull v5.1q,v27.1d,v5.1d
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
eor v30.16b,v30.16b,v5.16b
subs x3,x3,#128
b.lo Ltail4x
b Loop4x
.align 4
Loop4x:
eor v16.16b,v4.16b,v0.16b
ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
ext v3.16b,v16.16b,v16.16b,#8
#ifndef __AARCH64EB__
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v7.16b,v7.16b
rev64 v4.16b,v4.16b
#endif
pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v28.2d,v3.2d
ext v25.16b,v7.16b,v7.16b,#8
pmull2 v1.1q,v27.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
ext v24.16b,v6.16b,v6.16b,#8
eor v1.16b,v1.16b,v30.16b
ext v23.16b,v5.16b,v5.16b,#8
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
pmull v29.1q,v20.1d,v25.1d //H·Ii+3
eor v7.16b,v7.16b,v25.16b
eor v1.16b,v1.16b,v17.16b
pmull2 v31.1q,v20.2d,v25.2d
eor v1.16b,v1.16b,v18.16b
pmull v30.1q,v21.1d,v7.1d
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
eor v6.16b,v6.16b,v24.16b
pmull2 v24.1q,v22.2d,v24.2d
eor v0.16b,v1.16b,v18.16b
pmull2 v6.1q,v21.2d,v6.2d
eor v29.16b,v29.16b,v16.16b
eor v31.16b,v31.16b,v24.16b
eor v30.16b,v30.16b,v6.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
eor v5.16b,v5.16b,v23.16b
eor v18.16b,v18.16b,v2.16b
pmull2 v23.1q,v26.2d,v23.2d
pmull v5.1q,v27.1d,v5.1d
eor v0.16b,v0.16b,v18.16b
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
ext v0.16b,v0.16b,v0.16b,#8
eor v30.16b,v30.16b,v5.16b
subs x3,x3,#64
b.hs Loop4x
Ltail4x:
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v28.2d,v3.2d
pmull2 v1.1q,v27.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
adds x3,x3,#64
b.eq Ldone4x
cmp x3,#32
b.lo Lone
b.eq Ltwo
Lthree:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d,v5.2d,v6.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v24.16b,v6.16b,v6.16b,#8
ext v23.16b,v5.16b,v5.16b,#8
eor v0.16b,v1.16b,v18.16b
pmull v29.1q,v20.1d,v24.1d //H·Ii+2
eor v6.16b,v6.16b,v24.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
pmull2 v31.1q,v20.2d,v24.2d
pmull v30.1q,v21.1d,v6.1d
eor v0.16b,v0.16b,v18.16b
pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1
eor v5.16b,v5.16b,v23.16b
ext v0.16b,v0.16b,v0.16b,#8
pmull2 v23.1q,v22.2d,v23.2d
eor v16.16b,v4.16b,v0.16b
pmull2 v5.1q,v21.2d,v5.2d
ext v3.16b,v16.16b,v16.16b,#8
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
eor v30.16b,v30.16b,v5.16b
pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v26.2d,v3.2d
pmull v1.1q,v27.1d,v16.1d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
b Ldone4x
.align 4
Ltwo:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d,v5.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v5.16b,v5.16b
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v23.16b,v5.16b,v5.16b,#8
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
pmull v29.1q,v20.1d,v23.1d //H·Ii+1
eor v5.16b,v5.16b,v23.16b
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull2 v31.1q,v20.2d,v23.2d
pmull v30.1q,v21.1d,v5.1d
pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v22.2d,v3.2d
pmull2 v1.1q,v21.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
b Ldone4x
.align 4
Lone:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull v0.1q,v20.1d,v3.1d
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v20.2d,v3.2d
pmull v1.1q,v21.1d,v16.1d
Ldone4x:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
st1 {v0.2d},[x0] //write out Xi
ret
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
chairq/First-choice
| 19,365
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/chacha-x86-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl ChaCha20_ctr32
.hidden ChaCha20_ctr32
.type ChaCha20_ctr32,@function
.align 16
ChaCha20_ctr32:
.L_ChaCha20_ctr32_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
cmpl 28(%esp),%eax
je .L000no_data
call .Lpic_point
.Lpic_point:
popl %eax
leal OPENSSL_ia32cap_P-.Lpic_point(%eax),%ebp
testl $16777216,(%ebp)
jz .L001x86
testl $512,4(%ebp)
jz .L001x86
jmp .Lssse3_shortcut
.L001x86:
movl 32(%esp),%esi
movl 36(%esp),%edi
subl $132,%esp
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
movl %eax,80(%esp)
movl %ebx,84(%esp)
movl %ecx,88(%esp)
movl %edx,92(%esp)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
movl %eax,96(%esp)
movl %ebx,100(%esp)
movl %ecx,104(%esp)
movl %edx,108(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
subl $1,%eax
movl %eax,112(%esp)
movl %ebx,116(%esp)
movl %ecx,120(%esp)
movl %edx,124(%esp)
jmp .L002entry
.align 16
.L003outer_loop:
movl %ebx,156(%esp)
movl %eax,152(%esp)
movl %ecx,160(%esp)
.L002entry:
movl $1634760805,%eax
movl $857760878,4(%esp)
movl $2036477234,8(%esp)
movl $1797285236,12(%esp)
movl 84(%esp),%ebx
movl 88(%esp),%ebp
movl 104(%esp),%ecx
movl 108(%esp),%esi
movl 116(%esp),%edx
movl 120(%esp),%edi
movl %ebx,20(%esp)
movl %ebp,24(%esp)
movl %ecx,40(%esp)
movl %esi,44(%esp)
movl %edx,52(%esp)
movl %edi,56(%esp)
movl 92(%esp),%ebx
movl 124(%esp),%edi
movl 112(%esp),%edx
movl 80(%esp),%ebp
movl 96(%esp),%ecx
movl 100(%esp),%esi
addl $1,%edx
movl %ebx,28(%esp)
movl %edi,60(%esp)
movl %edx,112(%esp)
movl $10,%ebx
jmp .L004loop
.align 16
.L004loop:
addl %ebp,%eax
movl %ebx,128(%esp)
movl %ebp,%ebx
xorl %eax,%edx
roll $16,%edx
addl %edx,%ecx
xorl %ecx,%ebx
movl 52(%esp),%edi
roll $12,%ebx
movl 20(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,48(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,32(%esp)
roll $16,%edi
movl %ebx,16(%esp)
addl %edi,%esi
movl 40(%esp),%ecx
xorl %esi,%ebp
movl 56(%esp),%edx
roll $12,%ebp
movl 24(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,52(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,36(%esp)
roll $16,%edx
movl %ebp,20(%esp)
addl %edx,%ecx
movl 44(%esp),%esi
xorl %ecx,%ebx
movl 60(%esp),%edi
roll $12,%ebx
movl 28(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,56(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,24(%esp)
addl %edi,%esi
xorl %esi,%ebp
roll $12,%ebp
movl 20(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,%edx
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
roll $16,%edx
movl %ebp,28(%esp)
addl %edx,%ecx
xorl %ecx,%ebx
movl 48(%esp),%edi
roll $12,%ebx
movl 24(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,60(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,40(%esp)
roll $16,%edi
movl %ebx,20(%esp)
addl %edi,%esi
movl 32(%esp),%ecx
xorl %esi,%ebp
movl 52(%esp),%edx
roll $12,%ebp
movl 28(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,48(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,44(%esp)
roll $16,%edx
movl %ebp,24(%esp)
addl %edx,%ecx
movl 36(%esp),%esi
xorl %ecx,%ebx
movl 56(%esp),%edi
roll $12,%ebx
movl 16(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,52(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,28(%esp)
addl %edi,%esi
xorl %esi,%ebp
movl 48(%esp),%edx
roll $12,%ebp
movl 128(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,56(%esp)
xorl %esi,%ebp
roll $7,%ebp
decl %ebx
jnz .L004loop
movl 160(%esp),%ebx
addl $1634760805,%eax
addl 80(%esp),%ebp
addl 96(%esp),%ecx
addl 100(%esp),%esi
cmpl $64,%ebx
jb .L005tail
movl 156(%esp),%ebx
addl 112(%esp),%edx
addl 120(%esp),%edi
xorl (%ebx),%eax
xorl 16(%ebx),%ebp
movl %eax,(%esp)
movl 152(%esp),%eax
xorl 32(%ebx),%ecx
xorl 36(%ebx),%esi
xorl 48(%ebx),%edx
xorl 56(%ebx),%edi
movl %ebp,16(%eax)
movl %ecx,32(%eax)
movl %esi,36(%eax)
movl %edx,48(%eax)
movl %edi,56(%eax)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
xorl 4(%ebx),%ebp
xorl 8(%ebx),%ecx
xorl 12(%ebx),%esi
xorl 20(%ebx),%edx
xorl 24(%ebx),%edi
movl %ebp,4(%eax)
movl %ecx,8(%eax)
movl %esi,12(%eax)
movl %edx,20(%eax)
movl %edi,24(%eax)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
xorl 28(%ebx),%ebp
xorl 40(%ebx),%ecx
xorl 44(%ebx),%esi
xorl 52(%ebx),%edx
xorl 60(%ebx),%edi
leal 64(%ebx),%ebx
movl %ebp,28(%eax)
movl (%esp),%ebp
movl %ecx,40(%eax)
movl 160(%esp),%ecx
movl %esi,44(%eax)
movl %edx,52(%eax)
movl %edi,60(%eax)
movl %ebp,(%eax)
leal 64(%eax),%eax
subl $64,%ecx
jnz .L003outer_loop
jmp .L006done
.L005tail:
addl 112(%esp),%edx
addl 120(%esp),%edi
movl %eax,(%esp)
movl %ebp,16(%esp)
movl %ecx,32(%esp)
movl %esi,36(%esp)
movl %edx,48(%esp)
movl %edi,56(%esp)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
movl %ebp,4(%esp)
movl %ecx,8(%esp)
movl %esi,12(%esp)
movl %edx,20(%esp)
movl %edi,24(%esp)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
movl %ebp,28(%esp)
movl 156(%esp),%ebp
movl %ecx,40(%esp)
movl 152(%esp),%ecx
movl %esi,44(%esp)
xorl %esi,%esi
movl %edx,52(%esp)
movl %edi,60(%esp)
xorl %eax,%eax
xorl %edx,%edx
.L007tail_loop:
movb (%esi,%ebp,1),%al
movb (%esp,%esi,1),%dl
leal 1(%esi),%esi
xorb %dl,%al
movb %al,-1(%ecx,%esi,1)
decl %ebx
jnz .L007tail_loop
.L006done:
addl $132,%esp
.L000no_data:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size ChaCha20_ctr32,.-.L_ChaCha20_ctr32_begin
.hidden _ChaCha20_ssse3
.type _ChaCha20_ssse3,@function
.align 16
_ChaCha20_ssse3:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
.Lssse3_shortcut:
movl 20(%esp),%edi
movl 24(%esp),%esi
movl 28(%esp),%ecx
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $524,%esp
andl $-64,%esp
movl %ebp,512(%esp)
leal .Lssse3_data-.Lpic_point(%eax),%eax
movdqu (%ebx),%xmm3
cmpl $256,%ecx
jb .L0081x
movl %edx,516(%esp)
movl %ebx,520(%esp)
subl $256,%ecx
leal 384(%esp),%ebp
movdqu (%edx),%xmm7
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
paddd 48(%eax),%xmm0
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
psubd 64(%eax),%xmm0
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,64(%ebp)
movdqa %xmm1,80(%ebp)
movdqa %xmm2,96(%ebp)
movdqa %xmm3,112(%ebp)
movdqu 16(%edx),%xmm3
movdqa %xmm4,-64(%ebp)
movdqa %xmm5,-48(%ebp)
movdqa %xmm6,-32(%ebp)
movdqa %xmm7,-16(%ebp)
movdqa 32(%eax),%xmm7
leal 128(%esp),%ebx
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,(%ebp)
movdqa %xmm1,16(%ebp)
movdqa %xmm2,32(%ebp)
movdqa %xmm3,48(%ebp)
movdqa %xmm4,-128(%ebp)
movdqa %xmm5,-112(%ebp)
movdqa %xmm6,-96(%ebp)
movdqa %xmm7,-80(%ebp)
leal 128(%esi),%esi
leal 128(%edi),%edi
jmp .L009outer_loop
.align 16
.L009outer_loop:
movdqa -112(%ebp),%xmm1
movdqa -96(%ebp),%xmm2
movdqa -80(%ebp),%xmm3
movdqa -48(%ebp),%xmm5
movdqa -32(%ebp),%xmm6
movdqa -16(%ebp),%xmm7
movdqa %xmm1,-112(%ebx)
movdqa %xmm2,-96(%ebx)
movdqa %xmm3,-80(%ebx)
movdqa %xmm5,-48(%ebx)
movdqa %xmm6,-32(%ebx)
movdqa %xmm7,-16(%ebx)
movdqa 32(%ebp),%xmm2
movdqa 48(%ebp),%xmm3
movdqa 64(%ebp),%xmm4
movdqa 80(%ebp),%xmm5
movdqa 96(%ebp),%xmm6
movdqa 112(%ebp),%xmm7
paddd 64(%eax),%xmm4
movdqa %xmm2,32(%ebx)
movdqa %xmm3,48(%ebx)
movdqa %xmm4,64(%ebx)
movdqa %xmm5,80(%ebx)
movdqa %xmm6,96(%ebx)
movdqa %xmm7,112(%ebx)
movdqa %xmm4,64(%ebp)
movdqa -128(%ebp),%xmm0
movdqa %xmm4,%xmm6
movdqa -64(%ebp),%xmm3
movdqa (%ebp),%xmm4
movdqa 16(%ebp),%xmm5
movl $10,%edx
nop
.align 16
.L010loop:
paddd %xmm3,%xmm0
movdqa %xmm3,%xmm2
pxor %xmm0,%xmm6
pshufb (%eax),%xmm6
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -48(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 80(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,64(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-64(%ebx)
paddd %xmm7,%xmm5
movdqa 32(%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -32(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 96(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,80(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,16(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-48(%ebx)
paddd %xmm6,%xmm4
movdqa 48(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -16(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 112(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,96(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-32(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa -48(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,%xmm6
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
pshufb (%eax),%xmm6
movdqa %xmm3,-16(%ebx)
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -32(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 64(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,112(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,32(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-48(%ebx)
paddd %xmm7,%xmm5
movdqa (%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -16(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 80(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,64(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,48(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-32(%ebx)
paddd %xmm6,%xmm4
movdqa 16(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -64(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 96(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,80(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-16(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 64(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,96(%ebx)
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
por %xmm1,%xmm3
decl %edx
jnz .L010loop
movdqa %xmm3,-64(%ebx)
movdqa %xmm4,(%ebx)
movdqa %xmm5,16(%ebx)
movdqa %xmm6,64(%ebx)
movdqa %xmm7,96(%ebx)
movdqa -112(%ebx),%xmm1
movdqa -96(%ebx),%xmm2
movdqa -80(%ebx),%xmm3
paddd -128(%ebp),%xmm0
paddd -112(%ebp),%xmm1
paddd -96(%ebp),%xmm2
paddd -80(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa -64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa -48(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa -32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa -16(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd -64(%ebp),%xmm0
paddd -48(%ebp),%xmm1
paddd -32(%ebp),%xmm2
paddd -16(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa (%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 16(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 48(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd (%ebp),%xmm0
paddd 16(%ebp),%xmm1
paddd 32(%ebp),%xmm2
paddd 48(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa 64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 80(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 96(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 112(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd 64(%ebp),%xmm0
paddd 80(%ebp),%xmm1
paddd 96(%ebp),%xmm2
paddd 112(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 208(%esi),%esi
pxor %xmm0,%xmm4
pxor %xmm1,%xmm5
pxor %xmm2,%xmm6
pxor %xmm3,%xmm7
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 208(%edi),%edi
subl $256,%ecx
jnc .L009outer_loop
addl $256,%ecx
jz .L011done
movl 520(%esp),%ebx
leal -128(%esi),%esi
movl 516(%esp),%edx
leal -128(%edi),%edi
movd 64(%ebp),%xmm2
movdqu (%ebx),%xmm3
paddd 96(%eax),%xmm2
pand 112(%eax),%xmm3
por %xmm2,%xmm3
.L0081x:
movdqa 32(%eax),%xmm0
movdqu (%edx),%xmm1
movdqu 16(%edx),%xmm2
movdqa (%eax),%xmm6
movdqa 16(%eax),%xmm7
movl %ebp,48(%esp)
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
movl $10,%edx
jmp .L012loop1x
.align 16
.L013outer1x:
movdqa 80(%eax),%xmm3
movdqa (%esp),%xmm0
movdqa 16(%esp),%xmm1
movdqa 32(%esp),%xmm2
paddd 48(%esp),%xmm3
movl $10,%edx
movdqa %xmm3,48(%esp)
jmp .L012loop1x
.align 16
.L012loop1x:
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $57,%xmm1,%xmm1
pshufd $147,%xmm3,%xmm3
nop
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $147,%xmm1,%xmm1
pshufd $57,%xmm3,%xmm3
decl %edx
jnz .L012loop1x
paddd (%esp),%xmm0
paddd 16(%esp),%xmm1
paddd 32(%esp),%xmm2
paddd 48(%esp),%xmm3
cmpl $64,%ecx
jb .L014tail
movdqu (%esi),%xmm4
movdqu 16(%esi),%xmm5
pxor %xmm4,%xmm0
movdqu 32(%esi),%xmm4
pxor %xmm5,%xmm1
movdqu 48(%esi),%xmm5
pxor %xmm4,%xmm2
pxor %xmm5,%xmm3
leal 64(%esi),%esi
movdqu %xmm0,(%edi)
movdqu %xmm1,16(%edi)
movdqu %xmm2,32(%edi)
movdqu %xmm3,48(%edi)
leal 64(%edi),%edi
subl $64,%ecx
jnz .L013outer1x
jmp .L011done
.L014tail:
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
xorl %eax,%eax
xorl %edx,%edx
xorl %ebp,%ebp
.L015tail_loop:
movb (%esp,%ebp,1),%al
movb (%esi,%ebp,1),%dl
leal 1(%ebp),%ebp
xorb %dl,%al
movb %al,-1(%edi,%ebp,1)
decl %ecx
jnz .L015tail_loop
.L011done:
movl 512(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size _ChaCha20_ssse3,.-_ChaCha20_ssse3
.align 64
.Lssse3_data:
.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
.long 1634760805,857760878,2036477234,1797285236
.long 0,1,2,3
.long 4,4,4,4
.long 1,0,0,0
.long 4,0,0,0
.long 0,-1,-1,-1
.align 64
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54
.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32
.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111
.byte 114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
chairq/First-choice
| 11,057
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/ghash-neon-armv8-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#include <ring-core/arm_arch.h>
.text
.globl gcm_init_neon
.hidden gcm_init_neon
.type gcm_init_neon,%function
.align 4
gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.size gcm_init_neon,.-gcm_init_neon
.globl gcm_gmult_neon
.hidden gcm_gmult_neon
.type gcm_gmult_neon,%function
.align 4
gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, .Lmasks // load constants
add x9, x9, :lo12:.Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b .Lgmult_neon
.size gcm_gmult_neon,.-gcm_gmult_neon
.globl gcm_ghash_neon
.hidden gcm_ghash_neon
.type gcm_ghash_neon,%function
.align 4
gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, .Lmasks // load constants
add x9, x9, :lo12:.Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
.Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
.Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne .Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.size gcm_ghash_neon,.-gcm_ghash_neon
.section .rodata
.align 4
.Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
chairq/First-choice
| 8,527
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/aesv8-armx-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#include <ring-core/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.section .rodata
.align 5
.Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl aes_hw_set_encrypt_key
.hidden aes_hw_set_encrypt_key
.type aes_hw_set_encrypt_key,%function
.align 5
aes_hw_set_encrypt_key:
.Lenc_key:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x3,#-1
cmp x0,#0
b.eq .Lenc_key_abort
cmp x2,#0
b.eq .Lenc_key_abort
mov x3,#-2
cmp w1,#128
b.lt .Lenc_key_abort
cmp w1,#256
b.gt .Lenc_key_abort
tst w1,#0x3f
b.ne .Lenc_key_abort
adrp x3,.Lrcon
add x3,x3,:lo12:.Lrcon
cmp w1,#192
eor v0.16b,v0.16b,v0.16b
ld1 {v3.16b},[x0],#16
mov w1,#8 // reuse w1
ld1 {v1.4s,v2.4s},[x3],#32
b.lt .Loop128
// 192-bit key support was removed.
b .L256
.align 4
.Loop128:
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
b.ne .Loop128
ld1 {v1.4s},[x3]
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2]
add x2,x2,#0x50
mov w12,#10
b .Ldone
// 192-bit key support was removed.
.align 4
.L256:
ld1 {v4.16b},[x0]
mov w1,#7
mov w12,#14
st1 {v3.4s},[x2],#16
.Loop256:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2],#16
b.eq .Ldone
dup v6.4s,v3.s[3] // just splat
ext v5.16b,v0.16b,v4.16b,#12
aese v6.16b,v0.16b
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
eor v4.16b,v4.16b,v6.16b
b .Loop256
.Ldone:
str w12,[x2]
mov x3,#0
.Lenc_key_abort:
mov x0,x3 // return value
ldr x29,[sp],#16
ret
.size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key
.globl aes_hw_encrypt
.hidden aes_hw_encrypt
.type aes_hw_encrypt,%function
.align 5
aes_hw_encrypt:
AARCH64_VALID_CALL_TARGET
ldr w3,[x2,#240]
ld1 {v0.4s},[x2],#16
ld1 {v2.16b},[x0]
sub w3,w3,#2
ld1 {v1.4s},[x2],#16
.Loop_enc:
aese v2.16b,v0.16b
aesmc v2.16b,v2.16b
ld1 {v0.4s},[x2],#16
subs w3,w3,#2
aese v2.16b,v1.16b
aesmc v2.16b,v2.16b
ld1 {v1.4s},[x2],#16
b.gt .Loop_enc
aese v2.16b,v0.16b
aesmc v2.16b,v2.16b
ld1 {v0.4s},[x2]
aese v2.16b,v1.16b
eor v2.16b,v2.16b,v0.16b
st1 {v2.16b},[x1]
ret
.size aes_hw_encrypt,.-aes_hw_encrypt
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,%function
.align 5
aes_hw_ctr32_encrypt_blocks:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldr w5,[x3,#240]
ldr w8, [x4, #12]
ld1 {v0.4s},[x4]
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#4
mov x12,#16
cmp x2,#2
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
sub w5,w5,#2
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
csel x12,xzr,x12,lo
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __AARCH64EB__
rev w8, w8
#endif
add w10, w8, #1
orr v6.16b,v0.16b,v0.16b
rev w10, w10
mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls .Lctr32_tail
rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias
orr v18.16b,v6.16b,v6.16b
b .Loop3x_ctr32
.align 4
.Loop3x_ctr32:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v18.16b,v17.16b
aesmc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt .Loop3x_ctr32
aese v0.16b,v16.16b
aesmc v4.16b,v0.16b
aese v1.16b,v16.16b
aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16
add w9,w8,#1
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
rev w9,w9
aese v4.16b,v17.16b
aesmc v4.16b,v4.16b
aese v5.16b,v17.16b
aesmc v5.16b,v5.16b
ld1 {v19.16b},[x0],#16
mov x7,x3
aese v18.16b,v17.16b
aesmc v17.16b,v18.16b
aese v4.16b,v20.16b
aesmc v4.16b,v4.16b
aese v5.16b,v20.16b
aesmc v5.16b,v5.16b
eor v2.16b,v2.16b,v7.16b
add w10,w8,#2
aese v17.16b,v20.16b
aesmc v17.16b,v17.16b
eor v3.16b,v3.16b,v7.16b
add w8,w8,#3
aese v4.16b,v21.16b
aesmc v4.16b,v4.16b
aese v5.16b,v21.16b
aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b
mov v6.s[3], w9
aese v17.16b,v21.16b
aesmc v17.16b,v17.16b
orr v0.16b,v6.16b,v6.16b
rev w10,w10
aese v4.16b,v22.16b
aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b
aesmc v5.16b,v5.16b
orr v1.16b,v6.16b,v6.16b
mov v6.s[3], w12
aese v17.16b,v22.16b
aesmc v17.16b,v17.16b
orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3
aese v4.16b,v23.16b
aese v5.16b,v23.16b
aese v17.16b,v23.16b
eor v2.16b,v2.16b,v4.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
st1 {v2.16b},[x1],#16
eor v3.16b,v3.16b,v5.16b
mov w6,w5
st1 {v3.16b},[x1],#16
eor v19.16b,v19.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v19.16b},[x1],#16
b.hs .Loop3x_ctr32
adds x2,x2,#3
b.eq .Lctr32_done
cmp x2,#1
mov x12,#16
csel x12,xzr,x12,eq
.Lctr32_tail:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v17.4s},[x7],#16
b.gt .Lctr32_tail
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v2.16b},[x0],x12
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
ld1 {v3.16b},[x0]
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
eor v2.16b,v2.16b,v7.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
eor v3.16b,v3.16b,v7.16b
aese v0.16b,v23.16b
aese v1.16b,v23.16b
cmp x2,#1
eor v2.16b,v2.16b,v0.16b
eor v3.16b,v3.16b,v1.16b
st1 {v2.16b},[x1],#16
b.eq .Lctr32_done
st1 {v3.16b},[x1]
.Lctr32_done:
ldr x29,[sp],#16
ret
.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
chairq/First-choice
| 49,269
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/sha512-armv8-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the OpenSSL license (the "License"). You may not use
// this file except in compliance with the License. You can obtain a copy
// in the file LICENSE in the source distribution or at
// https://www.openssl.org/source/license.html
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project. The module is, however, dual licensed under OpenSSL and
// CRYPTOGAMS licenses depending on where you obtain it. For further
// details see http://www.openssl.org/~appro/cryptogams/.
//
// Permission to use under GPLv2 terms is granted.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
# include <ring-core/arm_arch.h>
#endif
.text
.hidden OPENSSL_armcap_P
.globl sha512_block_data_order
.hidden sha512_block_data_order
.type sha512_block_data_order,%function
.align 6
sha512_block_data_order:
AARCH64_VALID_CALL_TARGET
#ifndef __KERNEL__
#if defined(OPENSSL_HWASAN) && __clang_major__ >= 10
adrp x16,:pg_hi21_nc:OPENSSL_armcap_P
#else
adrp x16,OPENSSL_armcap_P
#endif
ldr w16,[x16,:lo12:OPENSSL_armcap_P]
tst w16,#ARMV8_SHA512
b.ne .Lv8_entry
#endif
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*8
ldp x20,x21,[x0] // load context
ldp x22,x23,[x0,#2*8]
ldp x24,x25,[x0,#4*8]
add x2,x1,x2,lsl#7 // end of input
ldp x26,x27,[x0,#6*8]
adrp x30,.LK512
add x30,x30,:lo12:.LK512
stp x0,x2,[x29,#96]
.Loop:
ldp x3,x4,[x1],#2*8
ldr x19,[x30],#8 // *K++
eor x28,x21,x22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev x3,x3 // 0
#endif
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x6,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x3 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x4,x4 // 1
#endif
ldp x5,x6,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x7,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x4 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x5,x5 // 2
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x8,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x5 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x6,x6 // 3
#endif
ldp x7,x8,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x9,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x6 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x7,x7 // 4
#endif
add x24,x24,x17 // h+=Sigma0(a)
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x10,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x7 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x10,ror#18 // Sigma1(e)
ror x10,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x10,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x8,x8 // 5
#endif
ldp x9,x10,[x1],#2*8
add x23,x23,x17 // h+=Sigma0(a)
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x11,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x8 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x11,ror#18 // Sigma1(e)
ror x11,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x11,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x9,x9 // 6
#endif
add x22,x22,x17 // h+=Sigma0(a)
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x12,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x9 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x12,ror#18 // Sigma1(e)
ror x12,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x12,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x10,x10 // 7
#endif
ldp x11,x12,[x1],#2*8
add x21,x21,x17 // h+=Sigma0(a)
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
eor x13,x25,x25,ror#23
and x17,x26,x25
bic x28,x27,x25
add x20,x20,x10 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x13,ror#18 // Sigma1(e)
ror x13,x21,#28
add x20,x20,x17 // h+=Ch(e,f,g)
eor x17,x21,x21,ror#5
add x20,x20,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x24,x24,x20 // d+=h
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x13,x17,ror#34 // Sigma0(a)
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x20,x20,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x11,x11 // 8
#endif
add x20,x20,x17 // h+=Sigma0(a)
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x14,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x11 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x14,ror#18 // Sigma1(e)
ror x14,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x14,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x12,x12 // 9
#endif
ldp x13,x14,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x15,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x12 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x15,ror#18 // Sigma1(e)
ror x15,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x15,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x13,x13 // 10
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x0,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x13 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x0,ror#18 // Sigma1(e)
ror x0,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x0,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x14,x14 // 11
#endif
ldp x15,x0,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x6,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x14 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x15,x15 // 12
#endif
add x24,x24,x17 // h+=Sigma0(a)
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x7,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x15 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x0,x0 // 13
#endif
ldp x1,x2,[x1]
add x23,x23,x17 // h+=Sigma0(a)
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x8,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x0 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x1,x1 // 14
#endif
ldr x6,[sp,#24]
add x22,x22,x17 // h+=Sigma0(a)
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x9,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x1 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x2,x2 // 15
#endif
ldr x7,[sp,#0]
add x21,x21,x17 // h+=Sigma0(a)
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
.Loop_16_xx:
ldr x8,[sp,#8]
str x11,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x10,x5,#1
and x17,x25,x24
ror x9,x2,#19
bic x19,x26,x24
ror x11,x20,#28
add x27,x27,x3 // h+=X[i]
eor x16,x16,x24,ror#18
eor x10,x10,x5,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x11,x11,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x9,x9,x2,ror#61
eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x11,x20,ror#39 // Sigma0(a)
eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
add x4,x4,x13
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x4,x4,x10
add x27,x27,x17 // h+=Sigma0(a)
add x4,x4,x9
ldr x9,[sp,#16]
str x12,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x11,x6,#1
and x17,x24,x23
ror x10,x3,#19
bic x28,x25,x23
ror x12,x27,#28
add x26,x26,x4 // h+=X[i]
eor x16,x16,x23,ror#18
eor x11,x11,x6,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x12,x12,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x10,x10,x3,ror#61
eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x12,x27,ror#39 // Sigma0(a)
eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
add x5,x5,x14
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x5,x5,x11
add x26,x26,x17 // h+=Sigma0(a)
add x5,x5,x10
ldr x10,[sp,#24]
str x13,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x12,x7,#1
and x17,x23,x22
ror x11,x4,#19
bic x19,x24,x22
ror x13,x26,#28
add x25,x25,x5 // h+=X[i]
eor x16,x16,x22,ror#18
eor x12,x12,x7,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x13,x13,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x11,x11,x4,ror#61
eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x13,x26,ror#39 // Sigma0(a)
eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
add x6,x6,x15
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x6,x6,x12
add x25,x25,x17 // h+=Sigma0(a)
add x6,x6,x11
ldr x11,[sp,#0]
str x14,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x13,x8,#1
and x17,x22,x21
ror x12,x5,#19
bic x28,x23,x21
ror x14,x25,#28
add x24,x24,x6 // h+=X[i]
eor x16,x16,x21,ror#18
eor x13,x13,x8,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x14,x14,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x12,x12,x5,ror#61
eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x14,x25,ror#39 // Sigma0(a)
eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
add x7,x7,x0
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x7,x7,x13
add x24,x24,x17 // h+=Sigma0(a)
add x7,x7,x12
ldr x12,[sp,#8]
str x15,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x14,x9,#1
and x17,x21,x20
ror x13,x6,#19
bic x19,x22,x20
ror x15,x24,#28
add x23,x23,x7 // h+=X[i]
eor x16,x16,x20,ror#18
eor x14,x14,x9,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x15,x15,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x13,x13,x6,ror#61
eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x15,x24,ror#39 // Sigma0(a)
eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
add x8,x8,x1
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x8,x8,x14
add x23,x23,x17 // h+=Sigma0(a)
add x8,x8,x13
ldr x13,[sp,#16]
str x0,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x15,x10,#1
and x17,x20,x27
ror x14,x7,#19
bic x28,x21,x27
ror x0,x23,#28
add x22,x22,x8 // h+=X[i]
eor x16,x16,x27,ror#18
eor x15,x15,x10,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x0,x0,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x14,x14,x7,ror#61
eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x0,x23,ror#39 // Sigma0(a)
eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
add x9,x9,x2
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x9,x9,x15
add x22,x22,x17 // h+=Sigma0(a)
add x9,x9,x14
ldr x14,[sp,#24]
str x1,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x0,x11,#1
and x17,x27,x26
ror x15,x8,#19
bic x19,x20,x26
ror x1,x22,#28
add x21,x21,x9 // h+=X[i]
eor x16,x16,x26,ror#18
eor x0,x0,x11,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x1,x1,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x15,x15,x8,ror#61
eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x1,x22,ror#39 // Sigma0(a)
eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
add x10,x10,x3
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x10,x10,x0
add x21,x21,x17 // h+=Sigma0(a)
add x10,x10,x15
ldr x15,[sp,#0]
str x2,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x1,x12,#1
and x17,x26,x25
ror x0,x9,#19
bic x28,x27,x25
ror x2,x21,#28
add x20,x20,x10 // h+=X[i]
eor x16,x16,x25,ror#18
eor x1,x1,x12,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x2,x2,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x0,x0,x9,ror#61
eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x2,x21,ror#39 // Sigma0(a)
eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
add x11,x11,x4
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x11,x11,x1
add x20,x20,x17 // h+=Sigma0(a)
add x11,x11,x0
ldr x0,[sp,#8]
str x3,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x2,x13,#1
and x17,x25,x24
ror x1,x10,#19
bic x19,x26,x24
ror x3,x20,#28
add x27,x27,x11 // h+=X[i]
eor x16,x16,x24,ror#18
eor x2,x2,x13,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x3,x3,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x1,x1,x10,ror#61
eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x3,x20,ror#39 // Sigma0(a)
eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
add x12,x12,x5
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x12,x12,x2
add x27,x27,x17 // h+=Sigma0(a)
add x12,x12,x1
ldr x1,[sp,#16]
str x4,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x3,x14,#1
and x17,x24,x23
ror x2,x11,#19
bic x28,x25,x23
ror x4,x27,#28
add x26,x26,x12 // h+=X[i]
eor x16,x16,x23,ror#18
eor x3,x3,x14,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x4,x4,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x2,x2,x11,ror#61
eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x4,x27,ror#39 // Sigma0(a)
eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
add x13,x13,x6
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x13,x13,x3
add x26,x26,x17 // h+=Sigma0(a)
add x13,x13,x2
ldr x2,[sp,#24]
str x5,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x4,x15,#1
and x17,x23,x22
ror x3,x12,#19
bic x19,x24,x22
ror x5,x26,#28
add x25,x25,x13 // h+=X[i]
eor x16,x16,x22,ror#18
eor x4,x4,x15,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x5,x5,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x3,x3,x12,ror#61
eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x5,x26,ror#39 // Sigma0(a)
eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
add x14,x14,x7
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x14,x14,x4
add x25,x25,x17 // h+=Sigma0(a)
add x14,x14,x3
ldr x3,[sp,#0]
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x5,x0,#1
and x17,x22,x21
ror x4,x13,#19
bic x28,x23,x21
ror x6,x25,#28
add x24,x24,x14 // h+=X[i]
eor x16,x16,x21,ror#18
eor x5,x5,x0,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x6,x6,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x4,x4,x13,ror#61
eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x25,ror#39 // Sigma0(a)
eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
add x15,x15,x8
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x15,x15,x5
add x24,x24,x17 // h+=Sigma0(a)
add x15,x15,x4
ldr x4,[sp,#8]
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x6,x1,#1
and x17,x21,x20
ror x5,x14,#19
bic x19,x22,x20
ror x7,x24,#28
add x23,x23,x15 // h+=X[i]
eor x16,x16,x20,ror#18
eor x6,x6,x1,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x7,x7,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x5,x5,x14,ror#61
eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x24,ror#39 // Sigma0(a)
eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
add x0,x0,x9
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x0,x0,x6
add x23,x23,x17 // h+=Sigma0(a)
add x0,x0,x5
ldr x5,[sp,#16]
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x7,x2,#1
and x17,x20,x27
ror x6,x15,#19
bic x28,x21,x27
ror x8,x23,#28
add x22,x22,x0 // h+=X[i]
eor x16,x16,x27,ror#18
eor x7,x7,x2,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x8,x8,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x6,x6,x15,ror#61
eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x23,ror#39 // Sigma0(a)
eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
add x1,x1,x10
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x1,x1,x7
add x22,x22,x17 // h+=Sigma0(a)
add x1,x1,x6
ldr x6,[sp,#24]
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x8,x3,#1
and x17,x27,x26
ror x7,x0,#19
bic x19,x20,x26
ror x9,x22,#28
add x21,x21,x1 // h+=X[i]
eor x16,x16,x26,ror#18
eor x8,x8,x3,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x9,x9,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x7,x7,x0,ror#61
eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x22,ror#39 // Sigma0(a)
eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
add x2,x2,x11
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x2,x2,x8
add x21,x21,x17 // h+=Sigma0(a)
add x2,x2,x7
ldr x7,[sp,#0]
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
cbnz x19,.Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#648 // rewind
ldp x3,x4,[x0]
ldp x5,x6,[x0,#2*8]
add x1,x1,#14*8 // advance input pointer
ldp x7,x8,[x0,#4*8]
add x20,x20,x3
ldp x9,x10,[x0,#6*8]
add x21,x21,x4
add x22,x22,x5
add x23,x23,x6
stp x20,x21,[x0]
add x24,x24,x7
add x25,x25,x8
stp x22,x23,[x0,#2*8]
add x26,x26,x9
add x27,x27,x10
cmp x1,x2
stp x24,x25,[x0,#4*8]
stp x26,x27,[x0,#6*8]
b.ne .Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*8
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.size sha512_block_data_order,.-sha512_block_data_order
.section .rodata
.align 6
.type .LK512,%object
.LK512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0 // terminator
.size .LK512,.-.LK512
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.type sha512_block_armv8,%function
.align 6
sha512_block_armv8:
.Lv8_entry:
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context
adrp x3,.LK512
add x3,x3,:lo12:.LK512
rev64 v16.16b,v16.16b
rev64 v17.16b,v17.16b
rev64 v18.16b,v18.16b
rev64 v19.16b,v19.16b
rev64 v20.16b,v20.16b
rev64 v21.16b,v21.16b
rev64 v22.16b,v22.16b
rev64 v23.16b,v23.16b
b .Loop_hw
.align 4
.Loop_hw:
ld1 {v24.2d},[x3],#16
subs x2,x2,#1
sub x4,x1,#128
orr v26.16b,v0.16b,v0.16b // offload
orr v27.16b,v1.16b,v1.16b
orr v28.16b,v2.16b,v2.16b
orr v29.16b,v3.16b,v3.16b
csel x1,x1,x4,ne // conditional rewind
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v16.2d
ld1 {v16.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v16.16b,v16.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v17.2d
ld1 {v17.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v17.16b,v17.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v18.2d
ld1 {v18.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v18.16b,v18.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v19.2d
ld1 {v19.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
rev64 v19.16b,v19.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v20.2d
ld1 {v20.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
rev64 v20.16b,v20.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v21.2d
ld1 {v21.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v21.16b,v21.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v22.2d
ld1 {v22.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v22.16b,v22.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
sub x3,x3,#80*8 // rewind
add v25.2d,v25.2d,v23.2d
ld1 {v23.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v23.16b,v23.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v0.2d,v0.2d,v26.2d // accumulate
add v1.2d,v1.2d,v27.2d
add v2.2d,v2.2d,v28.2d
add v3.2d,v3.2d,v29.2d
cbnz x2,.Loop_hw
st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context
ldr x29,[sp],#16
ret
.size sha512_block_armv8,.-sha512_block_armv8
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
chairq/First-choice
| 192,866
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/chacha20_poly1305_x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.extern OPENSSL_ia32cap_P
.hidden OPENSSL_ia32cap_P
chacha20_poly1305_constants:
.section .rodata
.align 64
.Lchacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.Lrol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.Lrol16:
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
.Lavx2_init:
.long 0,0,0,0
.Lsse_inc:
.long 1,0,0,0
.Lavx2_inc:
.long 2,0,0,0,2,0,0,0
.Lclamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF
.align 16
.Land_masks:
.byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
.text
.type poly_hash_ad_internal,@function
.align 64
poly_hash_ad_internal:
.cfi_startproc
.cfi_def_cfa rsp, 8
xorq %r10,%r10
xorq %r11,%r11
xorq %r12,%r12
cmpq $13,%r8
jne .Lhash_ad_loop
.Lpoly_fast_tls_ad:
movq (%rcx),%r10
movq 5(%rcx),%r11
shrq $24,%r11
movq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
ret
.Lhash_ad_loop:
cmpq $16,%r8
jb .Lhash_ad_tail
addq 0+0(%rcx),%r10
adcq 8+0(%rcx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rcx),%rcx
subq $16,%r8
jmp .Lhash_ad_loop
.Lhash_ad_tail:
cmpq $0,%r8
je .Lhash_ad_done
xorq %r13,%r13
xorq %r14,%r14
xorq %r15,%r15
addq %r8,%rcx
.Lhash_ad_tail_loop:
shldq $8,%r13,%r14
shlq $8,%r13
movzbq -1(%rcx),%r15
xorq %r15,%r13
decq %rcx
decq %r8
jne .Lhash_ad_tail_loop
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lhash_ad_done:
ret
.cfi_endproc
.size poly_hash_ad_internal, .-poly_hash_ad_internal
.globl chacha20_poly1305_open
.hidden chacha20_poly1305_open
.type chacha20_poly1305_open,@function
.align 64
chacha20_poly1305_open:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
pushq %r9
.cfi_adjust_cfa_offset 8
.cfi_offset %r9,-64
subq $288 + 0 + 32,%rsp
.cfi_adjust_cfa_offset 288 + 32
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movl OPENSSL_ia32cap_P+8(%rip),%eax
andl $288,%eax
xorl $288,%eax
jz chacha20_poly1305_open_avx2
cmpq $128,%rbx
jbe .Lopen_sse_128
movdqa .Lchacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm7
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movq $10,%r10
.Lopen_sse_init_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jne .Lopen_sse_init_rounds
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
pand .Lclamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
.Lopen_sse_main_loop:
cmpq $256,%rbx
jb .Lopen_sse_tail
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd .Lsse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $4,%rcx
movq %rsi,%r8
.Lopen_sse_main_loop_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
leaq 16(%r8),%r8
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %rcx
jge .Lopen_sse_main_loop_rounds
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
cmpq $-6,%rcx
jg .Lopen_sse_main_loop_rounds
paddd .Lchacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor 0+80(%rbp),%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp .Lopen_sse_main_loop
.Lopen_sse_tail:
testq %rbx,%rbx
jz .Lopen_sse_finalize
cmpq $192,%rbx
ja .Lopen_sse_tail_256
cmpq $128,%rbx
ja .Lopen_sse_tail_192
cmpq $64,%rbx
ja .Lopen_sse_tail_128
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
cmpq $16,%rcx
jb .Lopen_sse_tail_64_rounds
.Lopen_sse_tail_64_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
.Lopen_sse_tail_64_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
cmpq $16,%rcx
jae .Lopen_sse_tail_64_rounds_and_x1hash
cmpq $160,%r8
jne .Lopen_sse_tail_64_rounds
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp .Lopen_sse_tail_64_dec_loop
.Lopen_sse_tail_128:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movq %rbx,%rcx
andq $-16,%rcx
xorq %r8,%r8
.Lopen_sse_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lopen_sse_tail_128_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
cmpq %rcx,%r8
jb .Lopen_sse_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne .Lopen_sse_tail_128_rounds
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
subq $64,%rbx
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
jmp .Lopen_sse_tail_64_dec_loop
.Lopen_sse_tail_192:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movq %rbx,%rcx
movq $160,%r8
cmpq $160,%rcx
cmovgq %r8,%rcx
andq $-16,%rcx
xorq %r8,%r8
.Lopen_sse_tail_192_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lopen_sse_tail_192_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
cmpq %rcx,%r8
jb .Lopen_sse_tail_192_rounds_and_x1hash
cmpq $160,%r8
jne .Lopen_sse_tail_192_rounds
cmpq $176,%rbx
jb .Lopen_sse_tail_192_finish
addq 0+160(%rsi),%r10
adcq 8+160(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
cmpq $192,%rbx
jb .Lopen_sse_tail_192_finish
addq 0+176(%rsi),%r10
adcq 8+176(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lopen_sse_tail_192_finish:
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
subq $128,%rbx
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
jmp .Lopen_sse_tail_64_dec_loop
.Lopen_sse_tail_256:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd .Lsse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
xorq %r8,%r8
.Lopen_sse_tail_256_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
movdqa 0+80(%rbp),%xmm11
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb .Lrol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb .Lrol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
movdqa 0+80(%rbp),%xmm9
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
movdqa 0+80(%rbp),%xmm11
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb .Lrol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb .Lrol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
movdqa 0+80(%rbp),%xmm9
addq $16,%r8
cmpq $160,%r8
jb .Lopen_sse_tail_256_rounds_and_x1hash
movq %rbx,%rcx
andq $-16,%rcx
.Lopen_sse_tail_256_hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%r8
cmpq %rcx,%r8
jb .Lopen_sse_tail_256_hash
paddd .Lchacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqa 0+80(%rbp),%xmm12
subq $192,%rbx
leaq 192(%rsi),%rsi
leaq 192(%rdi),%rdi
.Lopen_sse_tail_64_dec_loop:
cmpq $16,%rbx
jb .Lopen_sse_tail_16_init
subq $16,%rbx
movdqu (%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
jmp .Lopen_sse_tail_64_dec_loop
.Lopen_sse_tail_16_init:
movdqa %xmm0,%xmm1
.Lopen_sse_tail_16:
testq %rbx,%rbx
jz .Lopen_sse_finalize
pxor %xmm3,%xmm3
leaq -1(%rsi,%rbx,1),%rsi
movq %rbx,%r8
.Lopen_sse_tail_16_compose:
pslldq $1,%xmm3
pinsrb $0,(%rsi),%xmm3
subq $1,%rsi
subq $1,%r8
jnz .Lopen_sse_tail_16_compose
.byte 102,73,15,126,221
pextrq $1,%xmm3,%r14
pxor %xmm1,%xmm3
.Lopen_sse_tail_16_extract:
pextrb $0,%xmm3,(%rdi)
psrldq $1,%xmm3
addq $1,%rdi
subq $1,%rbx
jne .Lopen_sse_tail_16_extract
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lopen_sse_finalize:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
.cfi_remember_state
addq $288 + 0 + 32,%rsp
.cfi_adjust_cfa_offset -(288 + 32)
popq %r9
.cfi_adjust_cfa_offset -8
.cfi_restore %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
ret
.Lopen_sse_128:
.cfi_restore_state
movdqu .Lchacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm13,%xmm15
movq $10,%r10
.Lopen_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz .Lopen_sse_128_rounds
paddd .Lchacha20_consts(%rip),%xmm0
paddd .Lchacha20_consts(%rip),%xmm1
paddd .Lchacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm9
paddd %xmm11,%xmm10
paddd %xmm15,%xmm13
paddd .Lsse_inc(%rip),%xmm15
paddd %xmm15,%xmm14
pand .Lclamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
.Lopen_sse_128_xor_hash:
cmpq $16,%rbx
jb .Lopen_sse_tail_16
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm1
movdqu %xmm1,0(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
movdqa %xmm2,%xmm13
movdqa %xmm6,%xmm2
movdqa %xmm10,%xmm6
movdqa %xmm14,%xmm10
jmp .Lopen_sse_128_xor_hash
.size chacha20_poly1305_open, .-chacha20_poly1305_open
.cfi_endproc
.globl chacha20_poly1305_seal
.hidden chacha20_poly1305_seal
.type chacha20_poly1305_seal,@function
.align 64
chacha20_poly1305_seal:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
pushq %r9
.cfi_adjust_cfa_offset 8
.cfi_offset %r9,-64
subq $288 + 0 + 32,%rsp
.cfi_adjust_cfa_offset 288 + 32
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
movl OPENSSL_ia32cap_P+8(%rip),%eax
andl $288,%eax
xorl $288,%eax
jz chacha20_poly1305_seal_avx2
cmpq $128,%rbx
jbe .Lseal_sse_128
movdqa .Lchacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,%xmm14
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $10,%r10
.Lseal_sse_init_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jnz .Lseal_sse_init_rounds
paddd .Lchacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
pand .Lclamp(%rip),%xmm3
movdqa %xmm3,0+0(%rbp)
movdqa %xmm7,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
cmpq $192,%rbx
ja .Lseal_sse_main_init
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
jmp .Lseal_sse_128_tail_hash
.Lseal_sse_main_init:
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 128(%rdi)
movdqu %xmm4,16 + 128(%rdi)
movdqu %xmm8,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
movq $2,%rcx
movq $8,%r8
cmpq $64,%rbx
jbe .Lseal_sse_tail_64
cmpq $128,%rbx
jbe .Lseal_sse_tail_128
cmpq $192,%rbx
jbe .Lseal_sse_tail_192
.Lseal_sse_main_loop:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd .Lsse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
.align 32
.Lseal_sse_main_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
leaq 16(%rdi),%rdi
decq %r8
jge .Lseal_sse_main_rounds
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg .Lseal_sse_main_rounds
paddd .Lchacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm14,0+80(%rbp)
movdqa %xmm14,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm14
pxor %xmm3,%xmm14
movdqu %xmm14,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm14
pxor %xmm7,%xmm14
movdqu %xmm14,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm14
pxor %xmm11,%xmm14
movdqu %xmm14,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm14
pxor %xmm15,%xmm14
movdqu %xmm14,48 + 0(%rdi)
movdqa 0+80(%rbp),%xmm14
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
cmpq $256,%rbx
ja .Lseal_sse_main_loop_xor
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
jmp .Lseal_sse_128_tail_hash
.Lseal_sse_main_loop_xor:
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
subq $256,%rbx
movq $6,%rcx
movq $4,%r8
cmpq $192,%rbx
jg .Lseal_sse_main_loop
movq %rbx,%rcx
testq %rbx,%rbx
je .Lseal_sse_128_tail_hash
movq $6,%rcx
cmpq $128,%rbx
ja .Lseal_sse_tail_192
cmpq $64,%rbx
ja .Lseal_sse_tail_128
.Lseal_sse_tail_64:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
.Lseal_sse_tail_64_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_sse_tail_64_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg .Lseal_sse_tail_64_rounds_and_x2hash
decq %r8
jge .Lseal_sse_tail_64_rounds_and_x1hash
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp .Lseal_sse_128_tail_xor
.Lseal_sse_tail_128:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
.Lseal_sse_tail_128_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_sse_tail_128_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
leaq 16(%rdi),%rdi
decq %rcx
jg .Lseal_sse_tail_128_rounds_and_x2hash
decq %r8
jge .Lseal_sse_tail_128_rounds_and_x1hash
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movq $64,%rcx
subq $64,%rbx
leaq 64(%rsi),%rsi
jmp .Lseal_sse_128_tail_hash
.Lseal_sse_tail_192:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
.Lseal_sse_tail_192_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_sse_tail_192_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
leaq 16(%rdi),%rdi
decq %rcx
jg .Lseal_sse_tail_192_rounds_and_x2hash
decq %r8
jge .Lseal_sse_tail_192_rounds_and_x1hash
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
.Lseal_sse_128_tail_hash:
cmpq $16,%rcx
jb .Lseal_sse_128_tail_xor
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
leaq 16(%rdi),%rdi
jmp .Lseal_sse_128_tail_hash
.Lseal_sse_128_tail_xor:
cmpq $16,%rbx
jb .Lseal_sse_tail_16
subq $16,%rbx
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,0(%rdi)
addq 0(%rdi),%r10
adcq 8(%rdi),%r11
adcq $1,%r12
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
movdqa %xmm1,%xmm12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
jmp .Lseal_sse_128_tail_xor
.Lseal_sse_tail_16:
testq %rbx,%rbx
jz .Lprocess_blocks_of_extra_in
movq %rbx,%r8
movq %rbx,%rcx
leaq -1(%rsi,%rbx,1),%rsi
pxor %xmm15,%xmm15
.Lseal_sse_tail_16_compose:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
decq %rcx
jne .Lseal_sse_tail_16_compose
pxor %xmm0,%xmm15
movq %rbx,%rcx
movdqu %xmm15,%xmm0
.Lseal_sse_tail_16_extract:
pextrb $0,%xmm0,(%rdi)
psrldq $1,%xmm0
addq $1,%rdi
subq $1,%rcx
jnz .Lseal_sse_tail_16_extract
movq 288 + 0 + 32(%rsp),%r9
movq 56(%r9),%r14
movq 48(%r9),%r13
testq %r14,%r14
jz .Lprocess_partial_block
movq $16,%r15
subq %rbx,%r15
cmpq %r15,%r14
jge .Lload_extra_in
movq %r14,%r15
.Lload_extra_in:
leaq -1(%r13,%r15,1),%rsi
addq %r15,%r13
subq %r15,%r14
movq %r13,48(%r9)
movq %r14,56(%r9)
addq %r15,%r8
pxor %xmm11,%xmm11
.Lload_extra_load_loop:
pslldq $1,%xmm11
pinsrb $0,(%rsi),%xmm11
leaq -1(%rsi),%rsi
subq $1,%r15
jnz .Lload_extra_load_loop
movq %rbx,%r15
.Lload_extra_shift_loop:
pslldq $1,%xmm11
subq $1,%r15
jnz .Lload_extra_shift_loop
leaq .Land_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
por %xmm11,%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lprocess_blocks_of_extra_in:
movq 288+32+0 (%rsp),%r9
movq 48(%r9),%rsi
movq 56(%r9),%r8
movq %r8,%rcx
shrq $4,%r8
.Lprocess_extra_hash_loop:
jz process_extra_in_trailer
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rsi),%rsi
subq $1,%r8
jmp .Lprocess_extra_hash_loop
process_extra_in_trailer:
andq $15,%rcx
movq %rcx,%rbx
jz .Ldo_length_block
leaq -1(%rsi,%rcx,1),%rsi
.Lprocess_extra_in_trailer_load:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
subq $1,%rcx
jnz .Lprocess_extra_in_trailer_load
.Lprocess_partial_block:
leaq .Land_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Ldo_length_block:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
.cfi_remember_state
addq $288 + 0 + 32,%rsp
.cfi_adjust_cfa_offset -(288 + 32)
popq %r9
.cfi_adjust_cfa_offset -8
.cfi_restore %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
ret
.Lseal_sse_128:
.cfi_restore_state
movdqu .Lchacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm14
movdqa %xmm14,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
movq $10,%r10
.Lseal_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz .Lseal_sse_128_rounds
paddd .Lchacha20_consts(%rip),%xmm0
paddd .Lchacha20_consts(%rip),%xmm1
paddd .Lchacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm8
paddd %xmm11,%xmm9
paddd %xmm15,%xmm12
paddd .Lsse_inc(%rip),%xmm15
paddd %xmm15,%xmm13
pand .Lclamp(%rip),%xmm2
movdqa %xmm2,0+0(%rbp)
movdqa %xmm6,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
jmp .Lseal_sse_128_tail_xor
.size chacha20_poly1305_seal, .-chacha20_poly1305_seal
.cfi_endproc
.type chacha20_poly1305_open_avx2,@function
.align 64
chacha20_poly1305_open_avx2:
.cfi_startproc
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.cfi_adjust_cfa_offset 8
.cfi_offset %r9,-64
.cfi_adjust_cfa_offset 288 + 32
vzeroupper
vmovdqa .Lchacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd .Lavx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe .Lopen_avx2_192
cmpq $320,%rbx
jbe .Lopen_avx2_320
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%r10
.Lopen_avx2_init_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
decq %r10
jne .Lopen_avx2_init_rounds
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand .Lclamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
.Lopen_avx2_init_hash:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%rcx
cmpq $64,%rcx
jne .Lopen_avx2_init_hash
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vmovdqu %ymm0,0(%rdi)
vmovdqu %ymm4,32(%rdi)
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
subq $64,%rbx
.Lopen_avx2_main_loop:
cmpq $512,%rbx
jb .Lopen_avx2_main_loop_done
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
.Lopen_avx2_main_loop_rounds:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rsi,%rcx,1),%r10
adcq 8+16(%rsi,%rcx,1),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rsi,%rcx,1),%r10
adcq 8+32(%rsi,%rcx,1),%r11
adcq $1,%r12
leaq 48(%rcx),%rcx
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
cmpq $60*8,%rcx
jne .Lopen_avx2_main_loop_rounds
vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+60*8(%rsi),%r10
adcq 8+60*8(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
addq 0+60*8+16(%rsi),%r10
adcq 8+60*8+16(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
leaq 512(%rdi),%rdi
subq $512,%rbx
jmp .Lopen_avx2_main_loop
.Lopen_avx2_main_loop_done:
testq %rbx,%rbx
vzeroupper
je .Lopen_sse_finalize
cmpq $384,%rbx
ja .Lopen_avx2_tail_512
cmpq $256,%rbx
ja .Lopen_avx2_tail_384
cmpq $128,%rbx
ja .Lopen_avx2_tail_256
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
andq $-16,%rcx
testq %rcx,%rcx
je .Lopen_avx2_tail_128_rounds
.Lopen_avx2_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lopen_avx2_tail_128_rounds:
addq $16,%r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb .Lopen_avx2_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne .Lopen_avx2_tail_128_rounds
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp .Lopen_avx2_tail_128_xor
.Lopen_avx2_tail_256:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $128,%rcx
shrq $4,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
.Lopen_avx2_tail_256_rounds_and_x1hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
.Lopen_avx2_tail_256_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
incq %r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
cmpq %rcx,%r8
jb .Lopen_avx2_tail_256_rounds_and_x1hash
cmpq $10,%r8
jne .Lopen_avx2_tail_256_rounds
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
.Lopen_avx2_tail_256_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg .Lopen_avx2_tail_256_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp .Lopen_avx2_tail_256_hash
.Lopen_avx2_tail_256_done:
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
subq $128,%rbx
jmp .Lopen_avx2_tail_128_xor
.Lopen_avx2_tail_384:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $256,%rcx
shrq $4,%rcx
addq $6,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
.Lopen_avx2_tail_384_rounds_and_x2hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
.Lopen_avx2_tail_384_rounds_and_x1hash:
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
incq %r8
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb .Lopen_avx2_tail_384_rounds_and_x2hash
cmpq $10,%r8
jne .Lopen_avx2_tail_384_rounds_and_x1hash
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
.Lopen_avx2_384_tail_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg .Lopen_avx2_384_tail_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp .Lopen_avx2_384_tail_hash
.Lopen_avx2_384_tail_done:
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp .Lopen_avx2_tail_128_xor
.Lopen_avx2_tail_512:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
movq %rsi,%r8
.Lopen_avx2_tail_512_rounds_and_x2hash:
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
.Lopen_avx2_tail_512_rounds_and_x1hash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+16(%r8),%r10
adcq 8+16(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%r8),%r8
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
incq %rcx
cmpq $4,%rcx
jl .Lopen_avx2_tail_512_rounds_and_x2hash
cmpq $10,%rcx
jne .Lopen_avx2_tail_512_rounds_and_x1hash
movq %rbx,%rcx
subq $384,%rcx
andq $-16,%rcx
.Lopen_avx2_tail_512_hash:
testq %rcx,%rcx
je .Lopen_avx2_tail_512_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
subq $16,%rcx
jmp .Lopen_avx2_tail_512_hash
.Lopen_avx2_tail_512_done:
vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 384(%rsi),%rsi
leaq 384(%rdi),%rdi
subq $384,%rbx
.Lopen_avx2_tail_128_xor:
cmpq $32,%rbx
jb .Lopen_avx2_tail_32_xor
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
jmp .Lopen_avx2_tail_128_xor
.Lopen_avx2_tail_32_xor:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb .Lopen_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm1
vmovdqu %xmm1,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vperm2i128 $0x11,%ymm0,%ymm0,%ymm0
vmovdqa %xmm0,%xmm1
.Lopen_avx2_exit:
vzeroupper
jmp .Lopen_sse_tail_16
.Lopen_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
.Lopen_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne .Lopen_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand .Lclamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
.Lopen_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
.Lopen_avx2_short_hash_and_xor_loop:
cmpq $32,%rbx
jb .Lopen_avx2_short_tail_32
subq $32,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rsi),%r10
adcq 8+16(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp .Lopen_avx2_short_hash_and_xor_loop
.Lopen_avx2_short_tail_32:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb .Lopen_avx2_short_tail_32_exit
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm1
.Lopen_avx2_short_tail_32_exit:
vzeroupper
jmp .Lopen_sse_tail_16
.Lopen_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13
vpaddd .Lavx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
.Lopen_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne .Lopen_avx2_320_rounds
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand .Lclamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp .Lopen_avx2_short
.size chacha20_poly1305_open_avx2, .-chacha20_poly1305_open_avx2
.cfi_endproc
.type chacha20_poly1305_seal_avx2,@function
.align 64
chacha20_poly1305_seal_avx2:
.cfi_startproc
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.cfi_adjust_cfa_offset 8
.cfi_offset %r9,-64
.cfi_adjust_cfa_offset 288 + 32
vzeroupper
vmovdqa .Lchacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd .Lavx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe .Lseal_avx2_192
cmpq $320,%rbx
jbe .Lseal_avx2_320
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm4,%ymm7
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vmovdqa %ymm8,%ymm11
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,%ymm15
vpaddd .Lavx2_inc(%rip),%ymm15,%ymm14
vpaddd .Lavx2_inc(%rip),%ymm14,%ymm13
vpaddd .Lavx2_inc(%rip),%ymm13,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm15,0+256(%rbp)
movq $10,%r10
.Lseal_avx2_init_rounds:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %r10
jnz .Lseal_avx2_init_rounds
vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vperm2i128 $0x02,%ymm3,%ymm7,%ymm15
vperm2i128 $0x13,%ymm3,%ymm7,%ymm3
vpand .Lclamp(%rip),%ymm15,%ymm15
vmovdqa %ymm15,0+0(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
vpxor 0(%rsi),%ymm3,%ymm3
vpxor 32(%rsi),%ymm11,%ymm11
vmovdqu %ymm3,0(%rdi)
vmovdqu %ymm11,32(%rdi)
vperm2i128 $0x02,%ymm2,%ymm6,%ymm15
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+64(%rsi),%ymm15,%ymm15
vpxor 32+64(%rsi),%ymm2,%ymm2
vpxor 64+64(%rsi),%ymm6,%ymm6
vpxor 96+64(%rsi),%ymm10,%ymm10
vmovdqu %ymm15,0+64(%rdi)
vmovdqu %ymm2,32+64(%rdi)
vmovdqu %ymm6,64+64(%rdi)
vmovdqu %ymm10,96+64(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm15
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+192(%rsi),%ymm15,%ymm15
vpxor 32+192(%rsi),%ymm1,%ymm1
vpxor 64+192(%rsi),%ymm5,%ymm5
vpxor 96+192(%rsi),%ymm9,%ymm9
vmovdqu %ymm15,0+192(%rdi)
vmovdqu %ymm1,32+192(%rdi)
vmovdqu %ymm5,64+192(%rdi)
vmovdqu %ymm9,96+192(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm15
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm15,%ymm8
leaq 320(%rsi),%rsi
subq $320,%rbx
movq $320,%rcx
cmpq $128,%rbx
jbe .Lseal_avx2_short_hash_remainder
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vpxor 64(%rsi),%ymm8,%ymm8
vpxor 96(%rsi),%ymm12,%ymm12
vmovdqu %ymm0,320(%rdi)
vmovdqu %ymm4,352(%rdi)
vmovdqu %ymm8,384(%rdi)
vmovdqu %ymm12,416(%rdi)
leaq 128(%rsi),%rsi
subq $128,%rbx
movq $8,%rcx
movq $2,%r8
cmpq $128,%rbx
jbe .Lseal_avx2_tail_128
cmpq $256,%rbx
jbe .Lseal_avx2_tail_256
cmpq $384,%rbx
jbe .Lseal_avx2_tail_384
cmpq $512,%rbx
jbe .Lseal_avx2_tail_512
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
subq $16,%rdi
movq $9,%rcx
jmp .Lseal_avx2_main_loop_rounds_entry
.align 32
.Lseal_avx2_main_loop:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%rcx
.align 32
.Lseal_avx2_main_loop_rounds:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lseal_avx2_main_loop_rounds_entry:
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rdi),%r10
adcq 8+32(%rdi),%r11
adcq $1,%r12
leaq 48(%rdi),%rdi
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %rcx
jne .Lseal_avx2_main_loop_rounds
vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
subq $512,%rbx
cmpq $512,%rbx
jg .Lseal_avx2_main_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
movq $10,%rcx
xorq %r8,%r8
cmpq $384,%rbx
ja .Lseal_avx2_tail_512
cmpq $256,%rbx
ja .Lseal_avx2_tail_384
cmpq $128,%rbx
ja .Lseal_avx2_tail_256
.Lseal_avx2_tail_128:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
.Lseal_avx2_tail_128_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_avx2_tail_128_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg .Lseal_avx2_tail_128_rounds_and_3xhash
decq %r8
jge .Lseal_avx2_tail_128_rounds_and_2xhash
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp .Lseal_avx2_short_loop
.Lseal_avx2_tail_256:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
.Lseal_avx2_tail_256_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_avx2_tail_256_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg .Lseal_avx2_tail_256_rounds_and_3xhash
decq %r8
jge .Lseal_avx2_tail_256_rounds_and_2xhash
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $128,%rcx
leaq 128(%rsi),%rsi
subq $128,%rbx
jmp .Lseal_avx2_short_hash_remainder
.Lseal_avx2_tail_384:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
.Lseal_avx2_tail_384_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_avx2_tail_384_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
leaq 32(%rdi),%rdi
decq %rcx
jg .Lseal_avx2_tail_384_rounds_and_3xhash
decq %r8
jge .Lseal_avx2_tail_384_rounds_and_2xhash
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $256,%rcx
leaq 256(%rsi),%rsi
subq $256,%rbx
jmp .Lseal_avx2_short_hash_remainder
.Lseal_avx2_tail_512:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
.Lseal_avx2_tail_512_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_avx2_tail_512_rounds_and_2xhash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
addq %rax,%r15
adcq %rdx,%r9
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg .Lseal_avx2_tail_512_rounds_and_3xhash
decq %r8
jge .Lseal_avx2_tail_512_rounds_and_2xhash
vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $384,%rcx
leaq 384(%rsi),%rsi
subq $384,%rbx
jmp .Lseal_avx2_short_hash_remainder
.Lseal_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13
vpaddd .Lavx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
.Lseal_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne .Lseal_avx2_320_rounds
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand .Lclamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp .Lseal_avx2_short
.Lseal_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
.Lseal_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne .Lseal_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand .Lclamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
.Lseal_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
.Lseal_avx2_short_hash_remainder:
cmpq $16,%rcx
jb .Lseal_avx2_short_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
addq $16,%rdi
jmp .Lseal_avx2_short_hash_remainder
.Lseal_avx2_short_loop:
cmpq $32,%rbx
jb .Lseal_avx2_short_tail
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp .Lseal_avx2_short_loop
.Lseal_avx2_short_tail:
cmpq $16,%rbx
jb .Lseal_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm0
.Lseal_avx2_exit:
vzeroupper
jmp .Lseal_sse_tail_16
.cfi_endproc
.size chacha20_poly1305_seal_avx2, .-chacha20_poly1305_seal_avx2
#endif
|
chairq/First-choice
| 34,174
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/sha256-armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the OpenSSL license (the "License"). You may not use
// this file except in compliance with the License. You can obtain a copy
// in the file LICENSE in the source distribution or at
// https://www.openssl.org/source/license.html
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project. The module is, however, dual licensed under OpenSSL and
// CRYPTOGAMS licenses depending on where you obtain it. For further
// details see http://www.openssl.org/~appro/cryptogams/.
//
// Permission to use under GPLv2 terms is granted.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
# include <ring-core/arm_arch.h>
#endif
.text
.private_extern _OPENSSL_armcap_P
.globl _sha256_block_data_order
.private_extern _sha256_block_data_order
.align 6
_sha256_block_data_order:
AARCH64_VALID_CALL_TARGET
#ifndef __KERNEL__
#if defined(OPENSSL_HWASAN) && __clang_major__ >= 10
adrp x16,:pg_hi21_nc:_OPENSSL_armcap_P
#else
adrp x16,_OPENSSL_armcap_P@PAGE
#endif
ldr w16,[x16,_OPENSSL_armcap_P@PAGEOFF]
tst w16,#ARMV8_SHA256
b.ne Lv8_entry
#endif
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*4
ldp w20,w21,[x0] // load context
ldp w22,w23,[x0,#2*4]
ldp w24,w25,[x0,#4*4]
add x2,x1,x2,lsl#6 // end of input
ldp w26,w27,[x0,#6*4]
adrp x30,LK256@PAGE
add x30,x30,LK256@PAGEOFF
stp x0,x2,[x29,#96]
Loop:
ldp w3,w4,[x1],#2*4
ldr w19,[x30],#4 // *K++
eor w28,w21,w22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev w3,w3 // 0
#endif
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w6,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w3 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w4,w4 // 1
#endif
ldp w5,w6,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w7,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w4 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w5,w5 // 2
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w8,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w5 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w6,w6 // 3
#endif
ldp w7,w8,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w9,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w6 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w7,w7 // 4
#endif
add w24,w24,w17 // h+=Sigma0(a)
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w10,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w7 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w10,ror#11 // Sigma1(e)
ror w10,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w10,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w8,w8 // 5
#endif
ldp w9,w10,[x1],#2*4
add w23,w23,w17 // h+=Sigma0(a)
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w11,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w8 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w11,ror#11 // Sigma1(e)
ror w11,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w11,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w9,w9 // 6
#endif
add w22,w22,w17 // h+=Sigma0(a)
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w12,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w9 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w12,ror#11 // Sigma1(e)
ror w12,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w12,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w10,w10 // 7
#endif
ldp w11,w12,[x1],#2*4
add w21,w21,w17 // h+=Sigma0(a)
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
eor w13,w25,w25,ror#14
and w17,w26,w25
bic w28,w27,w25
add w20,w20,w10 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w13,ror#11 // Sigma1(e)
ror w13,w21,#2
add w20,w20,w17 // h+=Ch(e,f,g)
eor w17,w21,w21,ror#9
add w20,w20,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w24,w24,w20 // d+=h
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w13,w17,ror#13 // Sigma0(a)
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w20,w20,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w11,w11 // 8
#endif
add w20,w20,w17 // h+=Sigma0(a)
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w14,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w11 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w14,ror#11 // Sigma1(e)
ror w14,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w14,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w12,w12 // 9
#endif
ldp w13,w14,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w15,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w12 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w15,ror#11 // Sigma1(e)
ror w15,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w15,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w13,w13 // 10
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w0,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w13 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w0,ror#11 // Sigma1(e)
ror w0,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w0,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w14,w14 // 11
#endif
ldp w15,w0,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w6,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w14 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w15,w15 // 12
#endif
add w24,w24,w17 // h+=Sigma0(a)
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w7,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w15 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w0,w0 // 13
#endif
ldp w1,w2,[x1]
add w23,w23,w17 // h+=Sigma0(a)
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w8,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w0 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w1,w1 // 14
#endif
ldr w6,[sp,#12]
add w22,w22,w17 // h+=Sigma0(a)
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w9,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w1 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w2,w2 // 15
#endif
ldr w7,[sp,#0]
add w21,w21,w17 // h+=Sigma0(a)
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
Loop_16_xx:
ldr w8,[sp,#4]
str w11,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w10,w5,#7
and w17,w25,w24
ror w9,w2,#17
bic w19,w26,w24
ror w11,w20,#2
add w27,w27,w3 // h+=X[i]
eor w16,w16,w24,ror#11
eor w10,w10,w5,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w11,w11,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w9,w9,w2,ror#19
eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w11,w20,ror#22 // Sigma0(a)
eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
add w4,w4,w13
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w4,w4,w10
add w27,w27,w17 // h+=Sigma0(a)
add w4,w4,w9
ldr w9,[sp,#8]
str w12,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w11,w6,#7
and w17,w24,w23
ror w10,w3,#17
bic w28,w25,w23
ror w12,w27,#2
add w26,w26,w4 // h+=X[i]
eor w16,w16,w23,ror#11
eor w11,w11,w6,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w12,w12,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w10,w10,w3,ror#19
eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w12,w27,ror#22 // Sigma0(a)
eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
add w5,w5,w14
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w5,w5,w11
add w26,w26,w17 // h+=Sigma0(a)
add w5,w5,w10
ldr w10,[sp,#12]
str w13,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w12,w7,#7
and w17,w23,w22
ror w11,w4,#17
bic w19,w24,w22
ror w13,w26,#2
add w25,w25,w5 // h+=X[i]
eor w16,w16,w22,ror#11
eor w12,w12,w7,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w13,w13,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w11,w11,w4,ror#19
eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w13,w26,ror#22 // Sigma0(a)
eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
add w6,w6,w15
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w6,w6,w12
add w25,w25,w17 // h+=Sigma0(a)
add w6,w6,w11
ldr w11,[sp,#0]
str w14,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w13,w8,#7
and w17,w22,w21
ror w12,w5,#17
bic w28,w23,w21
ror w14,w25,#2
add w24,w24,w6 // h+=X[i]
eor w16,w16,w21,ror#11
eor w13,w13,w8,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w14,w14,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w12,w12,w5,ror#19
eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w14,w25,ror#22 // Sigma0(a)
eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
add w7,w7,w0
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w7,w7,w13
add w24,w24,w17 // h+=Sigma0(a)
add w7,w7,w12
ldr w12,[sp,#4]
str w15,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w14,w9,#7
and w17,w21,w20
ror w13,w6,#17
bic w19,w22,w20
ror w15,w24,#2
add w23,w23,w7 // h+=X[i]
eor w16,w16,w20,ror#11
eor w14,w14,w9,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w15,w15,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w13,w13,w6,ror#19
eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w15,w24,ror#22 // Sigma0(a)
eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
add w8,w8,w1
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w8,w8,w14
add w23,w23,w17 // h+=Sigma0(a)
add w8,w8,w13
ldr w13,[sp,#8]
str w0,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w15,w10,#7
and w17,w20,w27
ror w14,w7,#17
bic w28,w21,w27
ror w0,w23,#2
add w22,w22,w8 // h+=X[i]
eor w16,w16,w27,ror#11
eor w15,w15,w10,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w0,w0,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w14,w14,w7,ror#19
eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w0,w23,ror#22 // Sigma0(a)
eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
add w9,w9,w2
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w9,w9,w15
add w22,w22,w17 // h+=Sigma0(a)
add w9,w9,w14
ldr w14,[sp,#12]
str w1,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w0,w11,#7
and w17,w27,w26
ror w15,w8,#17
bic w19,w20,w26
ror w1,w22,#2
add w21,w21,w9 // h+=X[i]
eor w16,w16,w26,ror#11
eor w0,w0,w11,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w1,w1,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w15,w15,w8,ror#19
eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w1,w22,ror#22 // Sigma0(a)
eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
add w10,w10,w3
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w10,w10,w0
add w21,w21,w17 // h+=Sigma0(a)
add w10,w10,w15
ldr w15,[sp,#0]
str w2,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w1,w12,#7
and w17,w26,w25
ror w0,w9,#17
bic w28,w27,w25
ror w2,w21,#2
add w20,w20,w10 // h+=X[i]
eor w16,w16,w25,ror#11
eor w1,w1,w12,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w2,w2,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w0,w0,w9,ror#19
eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w2,w21,ror#22 // Sigma0(a)
eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
add w11,w11,w4
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w11,w11,w1
add w20,w20,w17 // h+=Sigma0(a)
add w11,w11,w0
ldr w0,[sp,#4]
str w3,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w2,w13,#7
and w17,w25,w24
ror w1,w10,#17
bic w19,w26,w24
ror w3,w20,#2
add w27,w27,w11 // h+=X[i]
eor w16,w16,w24,ror#11
eor w2,w2,w13,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w3,w3,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w1,w1,w10,ror#19
eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w3,w20,ror#22 // Sigma0(a)
eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
add w12,w12,w5
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w12,w12,w2
add w27,w27,w17 // h+=Sigma0(a)
add w12,w12,w1
ldr w1,[sp,#8]
str w4,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w3,w14,#7
and w17,w24,w23
ror w2,w11,#17
bic w28,w25,w23
ror w4,w27,#2
add w26,w26,w12 // h+=X[i]
eor w16,w16,w23,ror#11
eor w3,w3,w14,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w4,w4,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w2,w2,w11,ror#19
eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w4,w27,ror#22 // Sigma0(a)
eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
add w13,w13,w6
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w13,w13,w3
add w26,w26,w17 // h+=Sigma0(a)
add w13,w13,w2
ldr w2,[sp,#12]
str w5,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w4,w15,#7
and w17,w23,w22
ror w3,w12,#17
bic w19,w24,w22
ror w5,w26,#2
add w25,w25,w13 // h+=X[i]
eor w16,w16,w22,ror#11
eor w4,w4,w15,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w5,w5,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w3,w3,w12,ror#19
eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w5,w26,ror#22 // Sigma0(a)
eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
add w14,w14,w7
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w14,w14,w4
add w25,w25,w17 // h+=Sigma0(a)
add w14,w14,w3
ldr w3,[sp,#0]
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w5,w0,#7
and w17,w22,w21
ror w4,w13,#17
bic w28,w23,w21
ror w6,w25,#2
add w24,w24,w14 // h+=X[i]
eor w16,w16,w21,ror#11
eor w5,w5,w0,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w6,w6,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w4,w4,w13,ror#19
eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w25,ror#22 // Sigma0(a)
eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
add w15,w15,w8
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w15,w15,w5
add w24,w24,w17 // h+=Sigma0(a)
add w15,w15,w4
ldr w4,[sp,#4]
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w6,w1,#7
and w17,w21,w20
ror w5,w14,#17
bic w19,w22,w20
ror w7,w24,#2
add w23,w23,w15 // h+=X[i]
eor w16,w16,w20,ror#11
eor w6,w6,w1,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w7,w7,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w5,w5,w14,ror#19
eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w24,ror#22 // Sigma0(a)
eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
add w0,w0,w9
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w0,w0,w6
add w23,w23,w17 // h+=Sigma0(a)
add w0,w0,w5
ldr w5,[sp,#8]
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w7,w2,#7
and w17,w20,w27
ror w6,w15,#17
bic w28,w21,w27
ror w8,w23,#2
add w22,w22,w0 // h+=X[i]
eor w16,w16,w27,ror#11
eor w7,w7,w2,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w8,w8,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w6,w6,w15,ror#19
eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w23,ror#22 // Sigma0(a)
eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
add w1,w1,w10
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w1,w1,w7
add w22,w22,w17 // h+=Sigma0(a)
add w1,w1,w6
ldr w6,[sp,#12]
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w8,w3,#7
and w17,w27,w26
ror w7,w0,#17
bic w19,w20,w26
ror w9,w22,#2
add w21,w21,w1 // h+=X[i]
eor w16,w16,w26,ror#11
eor w8,w8,w3,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w9,w9,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w7,w7,w0,ror#19
eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w22,ror#22 // Sigma0(a)
eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
add w2,w2,w11
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w2,w2,w8
add w21,w21,w17 // h+=Sigma0(a)
add w2,w2,w7
ldr w7,[sp,#0]
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
cbnz w19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#260 // rewind
ldp w3,w4,[x0]
ldp w5,w6,[x0,#2*4]
add x1,x1,#14*4 // advance input pointer
ldp w7,w8,[x0,#4*4]
add w20,w20,w3
ldp w9,w10,[x0,#6*4]
add w21,w21,w4
add w22,w22,w5
add w23,w23,w6
stp w20,w21,[x0]
add w24,w24,w7
add w25,w25,w8
stp w22,w23,[x0,#2*4]
add w26,w26,w9
add w27,w27,w10
cmp x1,x2
stp w24,w25,[x0,#4*4]
stp w26,w27,[x0,#6*4]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*4
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section __TEXT,__const
.align 6
LK256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0 //terminator
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.align 6
sha256_block_armv8:
Lv8_entry:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v0.4s,v1.4s},[x0]
adrp x3,LK256@PAGE
add x3,x3,LK256@PAGEOFF
Loop_hw:
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
sub x2,x2,#1
ld1 {v16.4s},[x3],#16
rev32 v4.16b,v4.16b
rev32 v5.16b,v5.16b
rev32 v6.16b,v6.16b
rev32 v7.16b,v7.16b
orr v18.16b,v0.16b,v0.16b // offload
orr v19.16b,v1.16b,v1.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
ld1 {v17.4s},[x3]
add v16.4s,v16.4s,v6.4s
sub x3,x3,#64*4-16 // rewind
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
add v17.4s,v17.4s,v7.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
add v0.4s,v0.4s,v18.4s
add v1.4s,v1.4s,v19.4s
cbnz x2,Loop_hw
st1 {v0.4s,v1.4s},[x0]
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
chairq/First-choice
| 34,346
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/sha256-armv8-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the OpenSSL license (the "License"). You may not use
// this file except in compliance with the License. You can obtain a copy
// in the file LICENSE in the source distribution or at
// https://www.openssl.org/source/license.html
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project. The module is, however, dual licensed under OpenSSL and
// CRYPTOGAMS licenses depending on where you obtain it. For further
// details see http://www.openssl.org/~appro/cryptogams/.
//
// Permission to use under GPLv2 terms is granted.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
# include <ring-core/arm_arch.h>
#endif
.text
.hidden OPENSSL_armcap_P
.globl sha256_block_data_order
.hidden sha256_block_data_order
.type sha256_block_data_order,%function
.align 6
sha256_block_data_order:
AARCH64_VALID_CALL_TARGET
#ifndef __KERNEL__
#if defined(OPENSSL_HWASAN) && __clang_major__ >= 10
adrp x16,:pg_hi21_nc:OPENSSL_armcap_P
#else
adrp x16,OPENSSL_armcap_P
#endif
ldr w16,[x16,:lo12:OPENSSL_armcap_P]
tst w16,#ARMV8_SHA256
b.ne .Lv8_entry
#endif
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*4
ldp w20,w21,[x0] // load context
ldp w22,w23,[x0,#2*4]
ldp w24,w25,[x0,#4*4]
add x2,x1,x2,lsl#6 // end of input
ldp w26,w27,[x0,#6*4]
adrp x30,.LK256
add x30,x30,:lo12:.LK256
stp x0,x2,[x29,#96]
.Loop:
ldp w3,w4,[x1],#2*4
ldr w19,[x30],#4 // *K++
eor w28,w21,w22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev w3,w3 // 0
#endif
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w6,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w3 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w4,w4 // 1
#endif
ldp w5,w6,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w7,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w4 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w5,w5 // 2
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w8,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w5 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w6,w6 // 3
#endif
ldp w7,w8,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w9,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w6 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w7,w7 // 4
#endif
add w24,w24,w17 // h+=Sigma0(a)
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w10,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w7 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w10,ror#11 // Sigma1(e)
ror w10,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w10,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w8,w8 // 5
#endif
ldp w9,w10,[x1],#2*4
add w23,w23,w17 // h+=Sigma0(a)
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w11,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w8 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w11,ror#11 // Sigma1(e)
ror w11,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w11,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w9,w9 // 6
#endif
add w22,w22,w17 // h+=Sigma0(a)
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w12,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w9 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w12,ror#11 // Sigma1(e)
ror w12,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w12,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w10,w10 // 7
#endif
ldp w11,w12,[x1],#2*4
add w21,w21,w17 // h+=Sigma0(a)
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
eor w13,w25,w25,ror#14
and w17,w26,w25
bic w28,w27,w25
add w20,w20,w10 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w13,ror#11 // Sigma1(e)
ror w13,w21,#2
add w20,w20,w17 // h+=Ch(e,f,g)
eor w17,w21,w21,ror#9
add w20,w20,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w24,w24,w20 // d+=h
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w13,w17,ror#13 // Sigma0(a)
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w20,w20,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w11,w11 // 8
#endif
add w20,w20,w17 // h+=Sigma0(a)
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w14,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w11 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w14,ror#11 // Sigma1(e)
ror w14,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w14,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w12,w12 // 9
#endif
ldp w13,w14,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w15,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w12 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w15,ror#11 // Sigma1(e)
ror w15,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w15,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w13,w13 // 10
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w0,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w13 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w0,ror#11 // Sigma1(e)
ror w0,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w0,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w14,w14 // 11
#endif
ldp w15,w0,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w6,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w14 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w15,w15 // 12
#endif
add w24,w24,w17 // h+=Sigma0(a)
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w7,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w15 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w0,w0 // 13
#endif
ldp w1,w2,[x1]
add w23,w23,w17 // h+=Sigma0(a)
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w8,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w0 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w1,w1 // 14
#endif
ldr w6,[sp,#12]
add w22,w22,w17 // h+=Sigma0(a)
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w9,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w1 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w2,w2 // 15
#endif
ldr w7,[sp,#0]
add w21,w21,w17 // h+=Sigma0(a)
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
.Loop_16_xx:
ldr w8,[sp,#4]
str w11,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w10,w5,#7
and w17,w25,w24
ror w9,w2,#17
bic w19,w26,w24
ror w11,w20,#2
add w27,w27,w3 // h+=X[i]
eor w16,w16,w24,ror#11
eor w10,w10,w5,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w11,w11,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w9,w9,w2,ror#19
eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w11,w20,ror#22 // Sigma0(a)
eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
add w4,w4,w13
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w4,w4,w10
add w27,w27,w17 // h+=Sigma0(a)
add w4,w4,w9
ldr w9,[sp,#8]
str w12,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w11,w6,#7
and w17,w24,w23
ror w10,w3,#17
bic w28,w25,w23
ror w12,w27,#2
add w26,w26,w4 // h+=X[i]
eor w16,w16,w23,ror#11
eor w11,w11,w6,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w12,w12,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w10,w10,w3,ror#19
eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w12,w27,ror#22 // Sigma0(a)
eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
add w5,w5,w14
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w5,w5,w11
add w26,w26,w17 // h+=Sigma0(a)
add w5,w5,w10
ldr w10,[sp,#12]
str w13,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w12,w7,#7
and w17,w23,w22
ror w11,w4,#17
bic w19,w24,w22
ror w13,w26,#2
add w25,w25,w5 // h+=X[i]
eor w16,w16,w22,ror#11
eor w12,w12,w7,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w13,w13,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w11,w11,w4,ror#19
eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w13,w26,ror#22 // Sigma0(a)
eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
add w6,w6,w15
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w6,w6,w12
add w25,w25,w17 // h+=Sigma0(a)
add w6,w6,w11
ldr w11,[sp,#0]
str w14,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w13,w8,#7
and w17,w22,w21
ror w12,w5,#17
bic w28,w23,w21
ror w14,w25,#2
add w24,w24,w6 // h+=X[i]
eor w16,w16,w21,ror#11
eor w13,w13,w8,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w14,w14,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w12,w12,w5,ror#19
eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w14,w25,ror#22 // Sigma0(a)
eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
add w7,w7,w0
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w7,w7,w13
add w24,w24,w17 // h+=Sigma0(a)
add w7,w7,w12
ldr w12,[sp,#4]
str w15,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w14,w9,#7
and w17,w21,w20
ror w13,w6,#17
bic w19,w22,w20
ror w15,w24,#2
add w23,w23,w7 // h+=X[i]
eor w16,w16,w20,ror#11
eor w14,w14,w9,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w15,w15,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w13,w13,w6,ror#19
eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w15,w24,ror#22 // Sigma0(a)
eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
add w8,w8,w1
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w8,w8,w14
add w23,w23,w17 // h+=Sigma0(a)
add w8,w8,w13
ldr w13,[sp,#8]
str w0,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w15,w10,#7
and w17,w20,w27
ror w14,w7,#17
bic w28,w21,w27
ror w0,w23,#2
add w22,w22,w8 // h+=X[i]
eor w16,w16,w27,ror#11
eor w15,w15,w10,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w0,w0,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w14,w14,w7,ror#19
eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w0,w23,ror#22 // Sigma0(a)
eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
add w9,w9,w2
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w9,w9,w15
add w22,w22,w17 // h+=Sigma0(a)
add w9,w9,w14
ldr w14,[sp,#12]
str w1,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w0,w11,#7
and w17,w27,w26
ror w15,w8,#17
bic w19,w20,w26
ror w1,w22,#2
add w21,w21,w9 // h+=X[i]
eor w16,w16,w26,ror#11
eor w0,w0,w11,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w1,w1,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w15,w15,w8,ror#19
eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w1,w22,ror#22 // Sigma0(a)
eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
add w10,w10,w3
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w10,w10,w0
add w21,w21,w17 // h+=Sigma0(a)
add w10,w10,w15
ldr w15,[sp,#0]
str w2,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w1,w12,#7
and w17,w26,w25
ror w0,w9,#17
bic w28,w27,w25
ror w2,w21,#2
add w20,w20,w10 // h+=X[i]
eor w16,w16,w25,ror#11
eor w1,w1,w12,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w2,w2,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w0,w0,w9,ror#19
eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w2,w21,ror#22 // Sigma0(a)
eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
add w11,w11,w4
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w11,w11,w1
add w20,w20,w17 // h+=Sigma0(a)
add w11,w11,w0
ldr w0,[sp,#4]
str w3,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w2,w13,#7
and w17,w25,w24
ror w1,w10,#17
bic w19,w26,w24
ror w3,w20,#2
add w27,w27,w11 // h+=X[i]
eor w16,w16,w24,ror#11
eor w2,w2,w13,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w3,w3,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w1,w1,w10,ror#19
eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w3,w20,ror#22 // Sigma0(a)
eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
add w12,w12,w5
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w12,w12,w2
add w27,w27,w17 // h+=Sigma0(a)
add w12,w12,w1
ldr w1,[sp,#8]
str w4,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w3,w14,#7
and w17,w24,w23
ror w2,w11,#17
bic w28,w25,w23
ror w4,w27,#2
add w26,w26,w12 // h+=X[i]
eor w16,w16,w23,ror#11
eor w3,w3,w14,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w4,w4,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w2,w2,w11,ror#19
eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w4,w27,ror#22 // Sigma0(a)
eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
add w13,w13,w6
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w13,w13,w3
add w26,w26,w17 // h+=Sigma0(a)
add w13,w13,w2
ldr w2,[sp,#12]
str w5,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w4,w15,#7
and w17,w23,w22
ror w3,w12,#17
bic w19,w24,w22
ror w5,w26,#2
add w25,w25,w13 // h+=X[i]
eor w16,w16,w22,ror#11
eor w4,w4,w15,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w5,w5,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w3,w3,w12,ror#19
eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w5,w26,ror#22 // Sigma0(a)
eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
add w14,w14,w7
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w14,w14,w4
add w25,w25,w17 // h+=Sigma0(a)
add w14,w14,w3
ldr w3,[sp,#0]
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w5,w0,#7
and w17,w22,w21
ror w4,w13,#17
bic w28,w23,w21
ror w6,w25,#2
add w24,w24,w14 // h+=X[i]
eor w16,w16,w21,ror#11
eor w5,w5,w0,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w6,w6,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w4,w4,w13,ror#19
eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w25,ror#22 // Sigma0(a)
eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
add w15,w15,w8
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w15,w15,w5
add w24,w24,w17 // h+=Sigma0(a)
add w15,w15,w4
ldr w4,[sp,#4]
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w6,w1,#7
and w17,w21,w20
ror w5,w14,#17
bic w19,w22,w20
ror w7,w24,#2
add w23,w23,w15 // h+=X[i]
eor w16,w16,w20,ror#11
eor w6,w6,w1,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w7,w7,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w5,w5,w14,ror#19
eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w24,ror#22 // Sigma0(a)
eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
add w0,w0,w9
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w0,w0,w6
add w23,w23,w17 // h+=Sigma0(a)
add w0,w0,w5
ldr w5,[sp,#8]
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w7,w2,#7
and w17,w20,w27
ror w6,w15,#17
bic w28,w21,w27
ror w8,w23,#2
add w22,w22,w0 // h+=X[i]
eor w16,w16,w27,ror#11
eor w7,w7,w2,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w8,w8,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w6,w6,w15,ror#19
eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w23,ror#22 // Sigma0(a)
eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
add w1,w1,w10
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w1,w1,w7
add w22,w22,w17 // h+=Sigma0(a)
add w1,w1,w6
ldr w6,[sp,#12]
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w8,w3,#7
and w17,w27,w26
ror w7,w0,#17
bic w19,w20,w26
ror w9,w22,#2
add w21,w21,w1 // h+=X[i]
eor w16,w16,w26,ror#11
eor w8,w8,w3,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w9,w9,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w7,w7,w0,ror#19
eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w22,ror#22 // Sigma0(a)
eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
add w2,w2,w11
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w2,w2,w8
add w21,w21,w17 // h+=Sigma0(a)
add w2,w2,w7
ldr w7,[sp,#0]
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
cbnz w19,.Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#260 // rewind
ldp w3,w4,[x0]
ldp w5,w6,[x0,#2*4]
add x1,x1,#14*4 // advance input pointer
ldp w7,w8,[x0,#4*4]
add w20,w20,w3
ldp w9,w10,[x0,#6*4]
add w21,w21,w4
add w22,w22,w5
add w23,w23,w6
stp w20,w21,[x0]
add w24,w24,w7
add w25,w25,w8
stp w22,w23,[x0,#2*4]
add w26,w26,w9
add w27,w27,w10
cmp x1,x2
stp w24,w25,[x0,#4*4]
stp w26,w27,[x0,#6*4]
b.ne .Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*4
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.size sha256_block_data_order,.-sha256_block_data_order
.section .rodata
.align 6
.type .LK256,%object
.LK256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0 //terminator
.size .LK256,.-.LK256
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.type sha256_block_armv8,%function
.align 6
sha256_block_armv8:
.Lv8_entry:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v0.4s,v1.4s},[x0]
adrp x3,.LK256
add x3,x3,:lo12:.LK256
.Loop_hw:
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
sub x2,x2,#1
ld1 {v16.4s},[x3],#16
rev32 v4.16b,v4.16b
rev32 v5.16b,v5.16b
rev32 v6.16b,v6.16b
rev32 v7.16b,v7.16b
orr v18.16b,v0.16b,v0.16b // offload
orr v19.16b,v1.16b,v1.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.inst 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.inst 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.inst 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
ld1 {v17.4s},[x3]
add v16.4s,v16.4s,v6.4s
sub x3,x3,#64*4-16 // rewind
orr v2.16b,v0.16b,v0.16b
.inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
add v17.4s,v17.4s,v7.4s
orr v2.16b,v0.16b,v0.16b
.inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
add v0.4s,v0.4s,v18.4s
add v1.4s,v1.4s,v19.4s
cbnz x2,.Loop_hw
st1 {v0.4s,v1.4s},[x0]
ldr x29,[sp],#16
ret
.size sha256_block_armv8,.-sha256_block_armv8
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
chairq/First-choice
| 26,592
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/vpaes-armv8-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#include <ring-core/arm_arch.h>
.section .rodata
.type _vpaes_consts,%object
.align 7 // totally strategic alignment
_vpaes_consts:
.Lk_mc_forward: // mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
.Lk_mc_backward: // mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
.Lk_sr: // sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
//
// "Hot" constants
//
.Lk_inv: // inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
.Lk_ipt: // input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
.Lk_sbo: // sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
.Lk_sb1: // sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.Lk_sb2: // sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
//
// Key schedule constants
//
.Lk_dksd: // decryption key schedule: invskew x*D
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
.Lk_dksb: // decryption key schedule: invskew x*B
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
.Lk_dkse: // decryption key schedule: invskew x*E + 0x63
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
.Lk_dks9: // decryption key schedule: invskew x*9
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
.Lk_rcon: // rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
.Lk_opt: // output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
.Lk_deskew: // deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.size _vpaes_consts,.-_vpaes_consts
.align 6
.text
##
## _aes_preheat
##
## Fills register %r10 -> .aes_consts (so you can -fPIC)
## and %xmm9-%xmm15 as specified below.
##
.type _vpaes_encrypt_preheat,%function
.align 4
_vpaes_encrypt_preheat:
adrp x10, .Lk_inv
add x10, x10, :lo12:.Lk_inv
movi v17.16b, #0x0f
ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv
ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // .Lk_sb1, .Lk_sb2
ret
.size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat
##
## _aes_encrypt_core
##
## AES-encrypt %xmm0.
##
## Inputs:
## %xmm0 = input
## %xmm9-%xmm15 as in _vpaes_preheat
## (%rdx) = scheduled keys
##
## Output in %xmm0
## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
## Preserves %xmm6 - %xmm8 so you get some local vectors
##
##
.type _vpaes_encrypt_core,%function
.align 4
_vpaes_encrypt_core:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, .Lk_mc_forward+16
add x11, x11, :lo12:.Lk_mc_forward+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
b .Lenc_entry
.align 4
.Lenc_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
sub w8, w8, #1 // nr--
.Lenc_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, .Lenc_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
ret
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.globl vpaes_encrypt
.hidden vpaes_encrypt
.type vpaes_encrypt,%function
.align 4
vpaes_encrypt:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v7.16b}, [x0]
bl _vpaes_encrypt_preheat
bl _vpaes_encrypt_core
st1 {v0.16b}, [x1]
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.size vpaes_encrypt,.-vpaes_encrypt
.type _vpaes_encrypt_2x,%function
.align 4
_vpaes_encrypt_2x:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, .Lk_mc_forward+16
add x11, x11, :lo12:.Lk_mc_forward+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
and v9.16b, v15.16b, v17.16b
ushr v8.16b, v15.16b, #4
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
tbl v9.16b, {v20.16b}, v9.16b
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
tbl v10.16b, {v21.16b}, v8.16b
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v8.16b, v9.16b, v16.16b
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
eor v8.16b, v8.16b, v10.16b
b .Lenc_2x_entry
.align 4
.Lenc_2x_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
tbl v12.16b, {v25.16b}, v10.16b
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
tbl v8.16b, {v24.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
tbl v13.16b, {v27.16b}, v10.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
tbl v10.16b, {v26.16b}, v11.16b
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
tbl v11.16b, {v8.16b}, v1.16b
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
eor v10.16b, v10.16b, v13.16b
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
tbl v8.16b, {v8.16b}, v4.16b
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
eor v11.16b, v11.16b, v10.16b
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
tbl v12.16b, {v11.16b},v1.16b
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
eor v8.16b, v8.16b, v11.16b
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
eor v8.16b, v8.16b, v12.16b
sub w8, w8, #1 // nr--
.Lenc_2x_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
and v9.16b, v8.16b, v17.16b
ushr v8.16b, v8.16b, #4
tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
tbl v13.16b, {v19.16b},v9.16b
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
eor v9.16b, v9.16b, v8.16b
tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v11.16b, {v18.16b},v8.16b
tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
tbl v12.16b, {v18.16b},v9.16b
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v11.16b, v11.16b, v13.16b
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
eor v12.16b, v12.16b, v13.16b
tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v10.16b, {v18.16b},v11.16b
tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
tbl v11.16b, {v18.16b},v12.16b
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v10.16b, v10.16b, v9.16b
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
eor v11.16b, v11.16b, v8.16b
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, .Lenc_2x_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
tbl v12.16b, {v22.16b}, v10.16b
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
tbl v8.16b, {v23.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
tbl v1.16b, {v8.16b},v1.16b
ret
.size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x
########################################################
## ##
## AES key schedule ##
## ##
########################################################
.type _vpaes_key_preheat,%function
.align 4
_vpaes_key_preheat:
adrp x10, .Lk_inv
add x10, x10, :lo12:.Lk_inv
movi v16.16b, #0x5b // .Lk_s63
adrp x11, .Lk_sb1
add x11, x11, :lo12:.Lk_sb1
movi v17.16b, #0x0f // .Lk_s0F
ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // .Lk_inv, .Lk_ipt
adrp x10, .Lk_dksd
add x10, x10, :lo12:.Lk_dksd
ld1 {v22.2d,v23.2d}, [x11] // .Lk_sb1
adrp x11, .Lk_mc_forward
add x11, x11, :lo12:.Lk_mc_forward
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb
ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9
ld1 {v8.2d}, [x10] // .Lk_rcon
ld1 {v9.2d}, [x11] // .Lk_mc_forward[0]
ret
.size _vpaes_key_preheat,.-_vpaes_key_preheat
.type _vpaes_schedule_core,%function
.align 4
_vpaes_schedule_core:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp,#-16]!
add x29,sp,#0
bl _vpaes_key_preheat // load the tables
ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
// input transform
mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
adrp x10, .Lk_sr // lea .Lk_sr(%rip),%r10
add x10, x10, :lo12:.Lk_sr
add x8, x8, x10
// encrypting, output zeroth round key after transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx)
cmp w1, #192 // cmp $192, %esi
b.hi .Lschedule_256
b.eq .Lschedule_192
// 128: fall though
##
## .schedule_128
##
## 128-bit specific part of key schedule.
##
## This schedule is really simple, because all its parts
## are accomplished by the subroutines.
##
.Lschedule_128:
mov x0, #10 // mov $10, %esi
.Loop_schedule_128:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
cbz x0, .Lschedule_mangle_last
bl _vpaes_schedule_mangle // write output
b .Loop_schedule_128
##
## .aes_schedule_192
##
## 192-bit specific part of key schedule.
##
## The main body of this schedule is the same as the 128-bit
## schedule, but with more smearing. The long, high side is
## stored in %xmm7 as before, and the short, low side is in
## the high bits of %xmm6.
##
## This schedule is somewhat nastier, however, because each
## round produces 192 bits of key material, or 1.5 round keys.
## Therefore, on each cycle we do 2 rounds and produce 3 round
## keys.
##
.align 4
.Lschedule_192:
sub x0, x0, #8
ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
bl _vpaes_schedule_transform // input transform
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
mov x0, #4 // mov $4, %esi
.Loop_schedule_192:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0
bl _vpaes_schedule_mangle // save key n
bl _vpaes_schedule_192_smear
bl _vpaes_schedule_mangle // save key n+1
bl _vpaes_schedule_round
cbz x0, .Lschedule_mangle_last
bl _vpaes_schedule_mangle // save key n+2
bl _vpaes_schedule_192_smear
b .Loop_schedule_192
##
## .aes_schedule_256
##
## 256-bit specific part of key schedule.
##
## The structure here is very similar to the 128-bit
## schedule, but with an additional "low side" in
## %xmm6. The low side's rounds are the same as the
## high side's, except no rcon and no rotation.
##
.align 4
.Lschedule_256:
ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform // input transform
mov x0, #7 // mov $7, %esi
.Loop_schedule_256:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_mangle // output low result
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
// high round
bl _vpaes_schedule_round
cbz x0, .Lschedule_mangle_last
bl _vpaes_schedule_mangle
// low round. swap xmm7 and xmm6
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
movi v4.16b, #0
mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
b .Loop_schedule_256
##
## .aes_schedule_mangle_last
##
## Mangler for last round of key schedule
## Mangles %xmm0
## when encrypting, outputs out(%xmm0) ^ 63
## when decrypting, outputs unskew(%xmm0)
##
## Always called right before return... jumps to cleanup and exits
##
.align 4
.Lschedule_mangle_last:
// schedule last round key from xmm0
adrp x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew
add x11, x11, :lo12:.Lk_deskew
cbnz w3, .Lschedule_mangle_last_dec
// encrypting
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
adrp x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform
add x11, x11, :lo12:.Lk_opt
add x2, x2, #32 // add $32, %rdx
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
.Lschedule_mangle_last_dec:
ld1 {v20.2d,v21.2d}, [x11] // reload constants
sub x2, x2, #16 // add $-16, %rdx
eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform // output transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key
// cleanup
eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
ldp x29, x30, [sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.size _vpaes_schedule_core,.-_vpaes_schedule_core
##
## .aes_schedule_192_smear
##
## Smear the short, low side in the 192-bit key schedule.
##
## Inputs:
## %xmm7: high side, b a x y
## %xmm6: low side, d c 0 0
## %xmm13: 0
##
## Outputs:
## %xmm6: b+c+d b+c 0 0
## %xmm0: b+c+d b+c b a
##
.type _vpaes_schedule_192_smear,%function
.align 4
_vpaes_schedule_192_smear:
movi v1.16b, #0
dup v0.4s, v7.s[3]
ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
ret
.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
##
## .aes_schedule_round
##
## Runs one main round of the key schedule on %xmm0, %xmm7
##
## Specifically, runs subbytes on the high dword of %xmm0
## then rotates it by one byte and xors into the low dword of
## %xmm7.
##
## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
## next rcon.
##
## Smears the dwords of %xmm7 by xoring the low into the
## second low, result into third, result into highest.
##
## Returns results in %xmm7 = %xmm0.
## Clobbers %xmm1-%xmm4, %r11.
##
.type _vpaes_schedule_round,%function
.align 4
_vpaes_schedule_round:
// extract rcon from xmm8
movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1
ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
// rotate
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0
// fall through...
// low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
// smear xmm7
ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4
// subbytes
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7
tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
// add in smeared stuff
eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
ret
.size _vpaes_schedule_round,.-_vpaes_schedule_round
##
## .aes_schedule_transform
##
## Linear-transform %xmm0 according to tables at (%r11)
##
## Requires that %xmm9 = 0x0F0F... as in preheat
## Output in %xmm0
## Clobbers %xmm1, %xmm2
##
.type _vpaes_schedule_transform,%function
.align 4
_vpaes_schedule_transform:
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0
// vmovdqa (%r11), %xmm2 # lo
tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
// vmovdqa 16(%r11), %xmm1 # hi
tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
ret
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
##
## .aes_schedule_mangle
##
## Mangle xmm0 from (basis-transformed) standard version
## to our version.
##
## On encrypt,
## xor with 0x63
## multiply by circulant 0,1,1,1
## apply shiftrows transform
##
## On decrypt,
## xor with 0x63
## multiply by "inverse mixcolumns" circulant E,B,D,9
## deskew
## apply shiftrows transform
##
##
## Writes out to (%rdx), and increments or decrements it
## Keeps track of round number mod 4 in %r8
## Preserves xmm0
## Clobbers xmm1-xmm5
##
.type _vpaes_schedule_mangle,%function
.align 4
_vpaes_schedule_mangle:
mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
// vmovdqa .Lk_mc_forward(%rip),%xmm5
// encrypting
eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4
add x2, x2, #16 // add $16, %rdx
tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
.Lschedule_mangle_both:
tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
add x8, x8, #48 // add $-16, %r8
and x8, x8, #~(1<<6) // and $0x30, %r8
st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
ret
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,%function
.align 4
vpaes_set_encrypt_key:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
lsr w9, w1, #5 // shr $5,%eax
add w9, w9, #5 // $5,%eax
str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov w3, #0 // mov $0,%ecx
mov x8, #0x30 // mov $0x30,%r8d
bl _vpaes_schedule_core
eor x0, x0, x0
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
.globl vpaes_ctr32_encrypt_blocks
.hidden vpaes_ctr32_encrypt_blocks
.type vpaes_ctr32_encrypt_blocks,%function
.align 4
vpaes_ctr32_encrypt_blocks:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
stp d10,d11,[sp,#-16]!
stp d12,d13,[sp,#-16]!
stp d14,d15,[sp,#-16]!
cbz x2, .Lctr32_done
// Note, unlike the other functions, x2 here is measured in blocks,
// not bytes.
mov x17, x2
mov x2, x3
// Load the IV and counter portion.
ldr w6, [x4, #12]
ld1 {v7.16b}, [x4]
bl _vpaes_encrypt_preheat
tst x17, #1
rev w6, w6 // The counter is big-endian.
b.eq .Lctr32_prep_loop
// Handle one block so the remaining block count is even for
// _vpaes_encrypt_2x.
ld1 {v6.16b}, [x0], #16 // .Load input ahead of time
bl _vpaes_encrypt_core
eor v0.16b, v0.16b, v6.16b // XOR input and result
st1 {v0.16b}, [x1], #16
subs x17, x17, #1
// Update the counter.
add w6, w6, #1
rev w7, w6
mov v7.s[3], w7
b.ls .Lctr32_done
.Lctr32_prep_loop:
// _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x
// uses v14 and v15.
mov v15.16b, v7.16b
mov v14.16b, v7.16b
add w6, w6, #1
rev w7, w6
mov v15.s[3], w7
.Lctr32_loop:
ld1 {v6.16b,v7.16b}, [x0], #32 // .Load input ahead of time
bl _vpaes_encrypt_2x
eor v0.16b, v0.16b, v6.16b // XOR input and result
eor v1.16b, v1.16b, v7.16b // XOR input and result (#2)
st1 {v0.16b,v1.16b}, [x1], #32
subs x17, x17, #2
// Update the counter.
add w7, w6, #1
add w6, w6, #2
rev w7, w7
mov v14.s[3], w7
rev w7, w6
mov v15.s[3], w7
b.hi .Lctr32_loop
.Lctr32_done:
ldp d14,d15,[sp],#16
ldp d12,d13,[sp],#16
ldp d10,d11,[sp],#16
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
chairq/First-choice
| 29,307
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/chacha-armv4-linux32.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
#include <ring-core/arm_arch.h>
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.arch armv7-a
.text
#if defined(__thumb2__) || defined(__clang__)
.syntax unified
#endif
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
#if defined(__thumb2__) || defined(__clang__)
#define ldrhsb ldrbhs
#endif
.align 5
.Lsigma:
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral
.Lone:
.long 1,0,0,0
#if __ARM_MAX_ARCH__>=7
.hidden OPENSSL_armcap_P
.LOPENSSL_armcap:
.word OPENSSL_armcap_P-.LChaCha20_ctr32
#else
.word -1
#endif
.globl ChaCha20_ctr32
.hidden ChaCha20_ctr32
.type ChaCha20_ctr32,%function
.align 5
ChaCha20_ctr32:
.LChaCha20_ctr32:
ldr r12,[sp,#0] @ pull pointer to counter and nonce
stmdb sp!,{r0,r1,r2,r4-r11,lr}
#if __ARM_ARCH__<7 && !defined(__thumb2__)
sub r14,pc,#16 @ ChaCha20_ctr32
#else
adr r14,.LChaCha20_ctr32
#endif
cmp r2,#0 @ len==0?
#ifdef __thumb2__
itt eq
#endif
addeq sp,sp,#4*3
beq .Lno_data
#if __ARM_MAX_ARCH__>=7
cmp r2,#192 @ test len
bls .Lshort
ldr r4,[r14,#-32]
ldr r4,[r14,r4]
# ifdef __APPLE__
ldr r4,[r4]
# endif
tst r4,#ARMV7_NEON
bne .LChaCha20_neon
.Lshort:
#endif
ldmia r12,{r4,r5,r6,r7} @ load counter and nonce
sub sp,sp,#4*(16) @ off-load area
sub r14,r14,#64 @ .Lsigma
stmdb sp!,{r4,r5,r6,r7} @ copy counter and nonce
ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key
ldmia r14,{r0,r1,r2,r3} @ load sigma
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy key
stmdb sp!,{r0,r1,r2,r3} @ copy sigma
str r10,[sp,#4*(16+10)] @ off-load "rx"
str r11,[sp,#4*(16+11)] @ off-load "rx"
b .Loop_outer_enter
.align 4
.Loop_outer:
ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material
str r11,[sp,#4*(32+2)] @ save len
str r12, [sp,#4*(32+1)] @ save inp
str r14, [sp,#4*(32+0)] @ save out
.Loop_outer_enter:
ldr r11, [sp,#4*(15)]
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
ldr r10, [sp,#4*(13)]
ldr r14,[sp,#4*(14)]
str r11, [sp,#4*(16+15)]
mov r11,#10
b .Loop
.align 4
.Loop:
subs r11,r11,#1
add r0,r0,r4
mov r12,r12,ror#16
add r1,r1,r5
mov r10,r10,ror#16
eor r12,r12,r0,ror#16
eor r10,r10,r1,ror#16
add r8,r8,r12
mov r4,r4,ror#20
add r9,r9,r10
mov r5,r5,ror#20
eor r4,r4,r8,ror#20
eor r5,r5,r9,ror#20
add r0,r0,r4
mov r12,r12,ror#24
add r1,r1,r5
mov r10,r10,ror#24
eor r12,r12,r0,ror#24
eor r10,r10,r1,ror#24
add r8,r8,r12
mov r4,r4,ror#25
add r9,r9,r10
mov r5,r5,ror#25
str r10,[sp,#4*(16+13)]
ldr r10,[sp,#4*(16+15)]
eor r4,r4,r8,ror#25
eor r5,r5,r9,ror#25
str r8,[sp,#4*(16+8)]
ldr r8,[sp,#4*(16+10)]
add r2,r2,r6
mov r14,r14,ror#16
str r9,[sp,#4*(16+9)]
ldr r9,[sp,#4*(16+11)]
add r3,r3,r7
mov r10,r10,ror#16
eor r14,r14,r2,ror#16
eor r10,r10,r3,ror#16
add r8,r8,r14
mov r6,r6,ror#20
add r9,r9,r10
mov r7,r7,ror#20
eor r6,r6,r8,ror#20
eor r7,r7,r9,ror#20
add r2,r2,r6
mov r14,r14,ror#24
add r3,r3,r7
mov r10,r10,ror#24
eor r14,r14,r2,ror#24
eor r10,r10,r3,ror#24
add r8,r8,r14
mov r6,r6,ror#25
add r9,r9,r10
mov r7,r7,ror#25
eor r6,r6,r8,ror#25
eor r7,r7,r9,ror#25
add r0,r0,r5
mov r10,r10,ror#16
add r1,r1,r6
mov r12,r12,ror#16
eor r10,r10,r0,ror#16
eor r12,r12,r1,ror#16
add r8,r8,r10
mov r5,r5,ror#20
add r9,r9,r12
mov r6,r6,ror#20
eor r5,r5,r8,ror#20
eor r6,r6,r9,ror#20
add r0,r0,r5
mov r10,r10,ror#24
add r1,r1,r6
mov r12,r12,ror#24
eor r10,r10,r0,ror#24
eor r12,r12,r1,ror#24
add r8,r8,r10
mov r5,r5,ror#25
str r10,[sp,#4*(16+15)]
ldr r10,[sp,#4*(16+13)]
add r9,r9,r12
mov r6,r6,ror#25
eor r5,r5,r8,ror#25
eor r6,r6,r9,ror#25
str r8,[sp,#4*(16+10)]
ldr r8,[sp,#4*(16+8)]
add r2,r2,r7
mov r10,r10,ror#16
str r9,[sp,#4*(16+11)]
ldr r9,[sp,#4*(16+9)]
add r3,r3,r4
mov r14,r14,ror#16
eor r10,r10,r2,ror#16
eor r14,r14,r3,ror#16
add r8,r8,r10
mov r7,r7,ror#20
add r9,r9,r14
mov r4,r4,ror#20
eor r7,r7,r8,ror#20
eor r4,r4,r9,ror#20
add r2,r2,r7
mov r10,r10,ror#24
add r3,r3,r4
mov r14,r14,ror#24
eor r10,r10,r2,ror#24
eor r14,r14,r3,ror#24
add r8,r8,r10
mov r7,r7,ror#25
add r9,r9,r14
mov r4,r4,ror#25
eor r7,r7,r8,ror#25
eor r4,r4,r9,ror#25
bne .Loop
ldr r11,[sp,#4*(32+2)] @ load len
str r8, [sp,#4*(16+8)] @ modulo-scheduled store
str r9, [sp,#4*(16+9)]
str r12,[sp,#4*(16+12)]
str r10, [sp,#4*(16+13)]
str r14,[sp,#4*(16+14)]
@ at this point we have first half of 512-bit result in
@ rx and second half at sp+4*(16+8)
cmp r11,#64 @ done yet?
#ifdef __thumb2__
itete lo
#endif
addlo r12,sp,#4*(0) @ shortcut or ...
ldrhs r12,[sp,#4*(32+1)] @ ... load inp
addlo r14,sp,#4*(0) @ shortcut or ...
ldrhs r14,[sp,#4*(32+0)] @ ... load out
ldr r8,[sp,#4*(0)] @ load key material
ldr r9,[sp,#4*(1)]
#if __ARM_ARCH__>=6 || !defined(__ARMEB__)
# if __ARM_ARCH__<7
orr r10,r12,r14
tst r10,#3 @ are input and output aligned?
ldr r10,[sp,#4*(2)]
bne .Lunaligned
cmp r11,#64 @ restore flags
# else
ldr r10,[sp,#4*(2)]
# endif
ldr r11,[sp,#4*(3)]
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r2,r2,r10
add r3,r3,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH__>=6 && defined(__ARMEB__)
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r0,r0,r8 @ xor with input
eorhs r1,r1,r9
add r8,sp,#4*(4)
str r0,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r2,r2,r10
eorhs r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r1,[r14,#-12]
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r6,r6,r10
add r7,r7,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH__>=6 && defined(__ARMEB__)
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r4,r4,r8
eorhs r5,r5,r9
add r8,sp,#4*(8)
str r4,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r6,r6,r10
eorhs r7,r7,r11
str r5,[r14,#-12]
ldmia r8,{r8,r9,r10,r11} @ load key material
str r6,[r14,#-8]
add r0,sp,#4*(16+8)
str r7,[r14,#-4]
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
# ifdef __thumb2__
itt hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it
strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it
add r2,r2,r10
add r3,r3,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH__>=6 && defined(__ARMEB__)
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r0,r0,r8
eorhs r1,r1,r9
add r8,sp,#4*(12)
str r0,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r2,r2,r10
eorhs r3,r3,r11
str r1,[r14,#-12]
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
# ifdef __thumb2__
itt hi
# endif
addhi r8,r8,#1 @ next counter value
strhi r8,[sp,#4*(12)] @ save next counter value
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r6,r6,r10
add r7,r7,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH__>=6 && defined(__ARMEB__)
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r4,r4,r8
eorhs r5,r5,r9
# ifdef __thumb2__
it ne
# endif
ldrne r8,[sp,#4*(32+2)] @ re-load len
# ifdef __thumb2__
itt hs
# endif
eorhs r6,r6,r10
eorhs r7,r7,r11
str r4,[r14],#16 @ store output
str r5,[r14,#-12]
# ifdef __thumb2__
it hs
# endif
subhs r11,r8,#64 @ len-=64
str r6,[r14,#-8]
str r7,[r14,#-4]
bhi .Loop_outer
beq .Ldone
# if __ARM_ARCH__<7
b .Ltail
.align 4
.Lunaligned:@ unaligned endian-neutral path
cmp r11,#64 @ restore flags
# endif
#endif
#if __ARM_ARCH__<7
ldr r11,[sp,#4*(3)]
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
add r2,r2,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r3,r3,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r0,r8,r0 @ xor with input (or zero)
eor r1,r9,r1
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r2,r10,r2
strb r0,[r14],#16 @ store output
eor r3,r11,r3
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r1,[r14,#-12]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-8]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r3,[r14,#-4]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-15]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r1,[r14,#-11]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-7]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r3,[r14,#-3]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-14]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r1,[r14,#-10]
strb r2,[r14,#-6]
eor r0,r8,r0,lsr#8
strb r3,[r14,#-2]
eor r1,r9,r1,lsr#8
strb r0,[r14,#-13]
eor r2,r10,r2,lsr#8
strb r1,[r14,#-9]
eor r3,r11,r3,lsr#8
strb r2,[r14,#-5]
strb r3,[r14,#-1]
add r8,sp,#4*(4+0)
ldmia r8,{r8,r9,r10,r11} @ load key material
add r0,sp,#4*(16+8)
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
add r6,r6,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r7,r7,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r4,r8,r4 @ xor with input (or zero)
eor r5,r9,r5
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r6,r10,r6
strb r4,[r14],#16 @ store output
eor r7,r11,r7
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r5,[r14,#-12]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-8]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r7,[r14,#-4]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-15]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r5,[r14,#-11]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-7]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r7,[r14,#-3]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-14]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r5,[r14,#-10]
strb r6,[r14,#-6]
eor r4,r8,r4,lsr#8
strb r7,[r14,#-2]
eor r5,r9,r5,lsr#8
strb r4,[r14,#-13]
eor r6,r10,r6,lsr#8
strb r5,[r14,#-9]
eor r7,r11,r7,lsr#8
strb r6,[r14,#-5]
strb r7,[r14,#-1]
add r8,sp,#4*(4+4)
ldmia r8,{r8,r9,r10,r11} @ load key material
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
# ifdef __thumb2__
itt hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx"
strhi r11,[sp,#4*(16+11)] @ copy "rx"
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
add r2,r2,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r3,r3,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r0,r8,r0 @ xor with input (or zero)
eor r1,r9,r1
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r2,r10,r2
strb r0,[r14],#16 @ store output
eor r3,r11,r3
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r1,[r14,#-12]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-8]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r3,[r14,#-4]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-15]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r1,[r14,#-11]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-7]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r3,[r14,#-3]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-14]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r1,[r14,#-10]
strb r2,[r14,#-6]
eor r0,r8,r0,lsr#8
strb r3,[r14,#-2]
eor r1,r9,r1,lsr#8
strb r0,[r14,#-13]
eor r2,r10,r2,lsr#8
strb r1,[r14,#-9]
eor r3,r11,r3,lsr#8
strb r2,[r14,#-5]
strb r3,[r14,#-1]
add r8,sp,#4*(4+8)
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
# ifdef __thumb2__
itt hi
# endif
addhi r8,r8,#1 @ next counter value
strhi r8,[sp,#4*(12)] @ save next counter value
add r5,r5,r9
add r6,r6,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r7,r7,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r4,r8,r4 @ xor with input (or zero)
eor r5,r9,r5
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r6,r10,r6
strb r4,[r14],#16 @ store output
eor r7,r11,r7
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r5,[r14,#-12]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-8]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r7,[r14,#-4]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-15]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r5,[r14,#-11]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-7]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r7,[r14,#-3]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-14]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r5,[r14,#-10]
strb r6,[r14,#-6]
eor r4,r8,r4,lsr#8
strb r7,[r14,#-2]
eor r5,r9,r5,lsr#8
strb r4,[r14,#-13]
eor r6,r10,r6,lsr#8
strb r5,[r14,#-9]
eor r7,r11,r7,lsr#8
strb r6,[r14,#-5]
strb r7,[r14,#-1]
# ifdef __thumb2__
it ne
# endif
ldrne r8,[sp,#4*(32+2)] @ re-load len
# ifdef __thumb2__
it hs
# endif
subhs r11,r8,#64 @ len-=64
bhi .Loop_outer
beq .Ldone
#endif
.Ltail:
ldr r12,[sp,#4*(32+1)] @ load inp
add r9,sp,#4*(0)
ldr r14,[sp,#4*(32+0)] @ load out
.Loop_tail:
ldrb r10,[r9],#1 @ read buffer on stack
ldrb r11,[r12],#1 @ read input
subs r8,r8,#1
eor r11,r11,r10
strb r11,[r14],#1 @ store output
bne .Loop_tail
.Ldone:
add sp,sp,#4*(32+3)
.Lno_data:
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
.size ChaCha20_ctr32,.-ChaCha20_ctr32
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.type ChaCha20_neon,%function
.align 5
ChaCha20_neon:
ldr r12,[sp,#0] @ pull pointer to counter and nonce
stmdb sp!,{r0,r1,r2,r4-r11,lr}
.LChaCha20_neon:
adr r14,.Lsigma
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI spec says so
stmdb sp!,{r0,r1,r2,r3}
vld1.32 {q1,q2},[r3] @ load key
ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key
sub sp,sp,#4*(16+16)
vld1.32 {q3},[r12] @ load counter and nonce
add r12,sp,#4*8
ldmia r14,{r0,r1,r2,r3} @ load sigma
vld1.32 {q0},[r14]! @ load sigma
vld1.32 {q12},[r14] @ one
vst1.32 {q2,q3},[r12] @ copy 1/2key|counter|nonce
vst1.32 {q0,q1},[sp] @ copy sigma|1/2key
str r10,[sp,#4*(16+10)] @ off-load "rx"
str r11,[sp,#4*(16+11)] @ off-load "rx"
vshl.i32 d26,d24,#1 @ two
vstr d24,[sp,#4*(16+0)]
vshl.i32 d28,d24,#2 @ four
vstr d26,[sp,#4*(16+2)]
vmov q4,q0
vstr d28,[sp,#4*(16+4)]
vmov q8,q0
vmov q5,q1
vmov q9,q1
b .Loop_neon_enter
.align 4
.Loop_neon_outer:
ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material
cmp r11,#64*2 @ if len<=64*2
bls .Lbreak_neon @ switch to integer-only
vmov q4,q0
str r11,[sp,#4*(32+2)] @ save len
vmov q8,q0
str r12, [sp,#4*(32+1)] @ save inp
vmov q5,q1
str r14, [sp,#4*(32+0)] @ save out
vmov q9,q1
.Loop_neon_enter:
ldr r11, [sp,#4*(15)]
vadd.i32 q7,q3,q12 @ counter+1
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
vmov q6,q2
ldr r10, [sp,#4*(13)]
vmov q10,q2
ldr r14,[sp,#4*(14)]
vadd.i32 q11,q7,q12 @ counter+2
str r11, [sp,#4*(16+15)]
mov r11,#10
add r12,r12,#3 @ counter+3
b .Loop_neon
.align 4
.Loop_neon:
subs r11,r11,#1
vadd.i32 q0,q0,q1
add r0,r0,r4
vadd.i32 q4,q4,q5
mov r12,r12,ror#16
vadd.i32 q8,q8,q9
add r1,r1,r5
veor q3,q3,q0
mov r10,r10,ror#16
veor q7,q7,q4
eor r12,r12,r0,ror#16
veor q11,q11,q8
eor r10,r10,r1,ror#16
vrev32.16 q3,q3
add r8,r8,r12
vrev32.16 q7,q7
mov r4,r4,ror#20
vrev32.16 q11,q11
add r9,r9,r10
vadd.i32 q2,q2,q3
mov r5,r5,ror#20
vadd.i32 q6,q6,q7
eor r4,r4,r8,ror#20
vadd.i32 q10,q10,q11
eor r5,r5,r9,ror#20
veor q12,q1,q2
add r0,r0,r4
veor q13,q5,q6
mov r12,r12,ror#24
veor q14,q9,q10
add r1,r1,r5
vshr.u32 q1,q12,#20
mov r10,r10,ror#24
vshr.u32 q5,q13,#20
eor r12,r12,r0,ror#24
vshr.u32 q9,q14,#20
eor r10,r10,r1,ror#24
vsli.32 q1,q12,#12
add r8,r8,r12
vsli.32 q5,q13,#12
mov r4,r4,ror#25
vsli.32 q9,q14,#12
add r9,r9,r10
vadd.i32 q0,q0,q1
mov r5,r5,ror#25
vadd.i32 q4,q4,q5
str r10,[sp,#4*(16+13)]
vadd.i32 q8,q8,q9
ldr r10,[sp,#4*(16+15)]
veor q12,q3,q0
eor r4,r4,r8,ror#25
veor q13,q7,q4
eor r5,r5,r9,ror#25
veor q14,q11,q8
str r8,[sp,#4*(16+8)]
vshr.u32 q3,q12,#24
ldr r8,[sp,#4*(16+10)]
vshr.u32 q7,q13,#24
add r2,r2,r6
vshr.u32 q11,q14,#24
mov r14,r14,ror#16
vsli.32 q3,q12,#8
str r9,[sp,#4*(16+9)]
vsli.32 q7,q13,#8
ldr r9,[sp,#4*(16+11)]
vsli.32 q11,q14,#8
add r3,r3,r7
vadd.i32 q2,q2,q3
mov r10,r10,ror#16
vadd.i32 q6,q6,q7
eor r14,r14,r2,ror#16
vadd.i32 q10,q10,q11
eor r10,r10,r3,ror#16
veor q12,q1,q2
add r8,r8,r14
veor q13,q5,q6
mov r6,r6,ror#20
veor q14,q9,q10
add r9,r9,r10
vshr.u32 q1,q12,#25
mov r7,r7,ror#20
vshr.u32 q5,q13,#25
eor r6,r6,r8,ror#20
vshr.u32 q9,q14,#25
eor r7,r7,r9,ror#20
vsli.32 q1,q12,#7
add r2,r2,r6
vsli.32 q5,q13,#7
mov r14,r14,ror#24
vsli.32 q9,q14,#7
add r3,r3,r7
vext.8 q2,q2,q2,#8
mov r10,r10,ror#24
vext.8 q6,q6,q6,#8
eor r14,r14,r2,ror#24
vext.8 q10,q10,q10,#8
eor r10,r10,r3,ror#24
vext.8 q1,q1,q1,#4
add r8,r8,r14
vext.8 q5,q5,q5,#4
mov r6,r6,ror#25
vext.8 q9,q9,q9,#4
add r9,r9,r10
vext.8 q3,q3,q3,#12
mov r7,r7,ror#25
vext.8 q7,q7,q7,#12
eor r6,r6,r8,ror#25
vext.8 q11,q11,q11,#12
eor r7,r7,r9,ror#25
vadd.i32 q0,q0,q1
add r0,r0,r5
vadd.i32 q4,q4,q5
mov r10,r10,ror#16
vadd.i32 q8,q8,q9
add r1,r1,r6
veor q3,q3,q0
mov r12,r12,ror#16
veor q7,q7,q4
eor r10,r10,r0,ror#16
veor q11,q11,q8
eor r12,r12,r1,ror#16
vrev32.16 q3,q3
add r8,r8,r10
vrev32.16 q7,q7
mov r5,r5,ror#20
vrev32.16 q11,q11
add r9,r9,r12
vadd.i32 q2,q2,q3
mov r6,r6,ror#20
vadd.i32 q6,q6,q7
eor r5,r5,r8,ror#20
vadd.i32 q10,q10,q11
eor r6,r6,r9,ror#20
veor q12,q1,q2
add r0,r0,r5
veor q13,q5,q6
mov r10,r10,ror#24
veor q14,q9,q10
add r1,r1,r6
vshr.u32 q1,q12,#20
mov r12,r12,ror#24
vshr.u32 q5,q13,#20
eor r10,r10,r0,ror#24
vshr.u32 q9,q14,#20
eor r12,r12,r1,ror#24
vsli.32 q1,q12,#12
add r8,r8,r10
vsli.32 q5,q13,#12
mov r5,r5,ror#25
vsli.32 q9,q14,#12
str r10,[sp,#4*(16+15)]
vadd.i32 q0,q0,q1
ldr r10,[sp,#4*(16+13)]
vadd.i32 q4,q4,q5
add r9,r9,r12
vadd.i32 q8,q8,q9
mov r6,r6,ror#25
veor q12,q3,q0
eor r5,r5,r8,ror#25
veor q13,q7,q4
eor r6,r6,r9,ror#25
veor q14,q11,q8
str r8,[sp,#4*(16+10)]
vshr.u32 q3,q12,#24
ldr r8,[sp,#4*(16+8)]
vshr.u32 q7,q13,#24
add r2,r2,r7
vshr.u32 q11,q14,#24
mov r10,r10,ror#16
vsli.32 q3,q12,#8
str r9,[sp,#4*(16+11)]
vsli.32 q7,q13,#8
ldr r9,[sp,#4*(16+9)]
vsli.32 q11,q14,#8
add r3,r3,r4
vadd.i32 q2,q2,q3
mov r14,r14,ror#16
vadd.i32 q6,q6,q7
eor r10,r10,r2,ror#16
vadd.i32 q10,q10,q11
eor r14,r14,r3,ror#16
veor q12,q1,q2
add r8,r8,r10
veor q13,q5,q6
mov r7,r7,ror#20
veor q14,q9,q10
add r9,r9,r14
vshr.u32 q1,q12,#25
mov r4,r4,ror#20
vshr.u32 q5,q13,#25
eor r7,r7,r8,ror#20
vshr.u32 q9,q14,#25
eor r4,r4,r9,ror#20
vsli.32 q1,q12,#7
add r2,r2,r7
vsli.32 q5,q13,#7
mov r10,r10,ror#24
vsli.32 q9,q14,#7
add r3,r3,r4
vext.8 q2,q2,q2,#8
mov r14,r14,ror#24
vext.8 q6,q6,q6,#8
eor r10,r10,r2,ror#24
vext.8 q10,q10,q10,#8
eor r14,r14,r3,ror#24
vext.8 q1,q1,q1,#12
add r8,r8,r10
vext.8 q5,q5,q5,#12
mov r7,r7,ror#25
vext.8 q9,q9,q9,#12
add r9,r9,r14
vext.8 q3,q3,q3,#4
mov r4,r4,ror#25
vext.8 q7,q7,q7,#4
eor r7,r7,r8,ror#25
vext.8 q11,q11,q11,#4
eor r4,r4,r9,ror#25
bne .Loop_neon
add r11,sp,#32
vld1.32 {q12,q13},[sp] @ load key material
vld1.32 {q14,q15},[r11]
ldr r11,[sp,#4*(32+2)] @ load len
str r8, [sp,#4*(16+8)] @ modulo-scheduled store
str r9, [sp,#4*(16+9)]
str r12,[sp,#4*(16+12)]
str r10, [sp,#4*(16+13)]
str r14,[sp,#4*(16+14)]
@ at this point we have first half of 512-bit result in
@ rx and second half at sp+4*(16+8)
ldr r12,[sp,#4*(32+1)] @ load inp
ldr r14,[sp,#4*(32+0)] @ load out
vadd.i32 q0,q0,q12 @ accumulate key material
vadd.i32 q4,q4,q12
vadd.i32 q8,q8,q12
vldr d24,[sp,#4*(16+0)] @ one
vadd.i32 q1,q1,q13
vadd.i32 q5,q5,q13
vadd.i32 q9,q9,q13
vldr d26,[sp,#4*(16+2)] @ two
vadd.i32 q2,q2,q14
vadd.i32 q6,q6,q14
vadd.i32 q10,q10,q14
vadd.i32 d14,d14,d24 @ counter+1
vadd.i32 d22,d22,d26 @ counter+2
vadd.i32 q3,q3,q15
vadd.i32 q7,q7,q15
vadd.i32 q11,q11,q15
cmp r11,#64*4
blo .Ltail_neon
vld1.8 {q12,q13},[r12]! @ load input
mov r11,sp
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12 @ xor with input
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
vst1.8 {q0,q1},[r14]! @ store output
veor q5,q5,q13
vld1.8 {q12,q13},[r12]!
veor q6,q6,q14
vst1.8 {q2,q3},[r14]!
veor q7,q7,q15
vld1.8 {q14,q15},[r12]!
veor q8,q8,q12
vld1.32 {q0,q1},[r11]! @ load for next iteration
veor d25,d25,d25
vldr d24,[sp,#4*(16+4)] @ four
veor q9,q9,q13
vld1.32 {q2,q3},[r11]
veor q10,q10,q14
vst1.8 {q4,q5},[r14]!
veor q11,q11,q15
vst1.8 {q6,q7},[r14]!
vadd.i32 d6,d6,d24 @ next counter value
vldr d24,[sp,#4*(16+0)] @ one
ldmia sp,{r8,r9,r10,r11} @ load key material
add r0,r0,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
vst1.8 {q8,q9},[r14]!
add r1,r1,r9
ldr r9,[r12,#-12]
vst1.8 {q10,q11},[r14]!
add r2,r2,r10
ldr r10,[r12,#-8]
add r3,r3,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
eor r0,r0,r8 @ xor with input
add r8,sp,#4*(4)
eor r1,r1,r9
str r0,[r14],#16 @ store output
eor r2,r2,r10
str r1,[r14,#-12]
eor r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
add r5,r5,r9
ldr r9,[r12,#-12]
add r6,r6,r10
ldr r10,[r12,#-8]
add r7,r7,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
eor r4,r4,r8
add r8,sp,#4*(8)
eor r5,r5,r9
str r4,[r14],#16 @ store output
eor r6,r6,r10
str r5,[r14,#-12]
eor r7,r7,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r6,[r14,#-8]
add r0,sp,#4*(16+8)
str r7,[r14,#-4]
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
add r1,r1,r9
ldr r9,[r12,#-12]
# ifdef __thumb2__
it hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it
add r2,r2,r10
ldr r10,[r12,#-8]
# ifdef __thumb2__
it hi
# endif
strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it
add r3,r3,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
eor r0,r0,r8
add r8,sp,#4*(12)
eor r1,r1,r9
str r0,[r14],#16 @ store output
eor r2,r2,r10
str r1,[r14,#-12]
eor r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r8,r8,#4 @ next counter value
add r5,r5,r9
str r8,[sp,#4*(12)] @ save next counter value
ldr r8,[r12],#16 @ load input
add r6,r6,r10
add r4,r4,#3 @ counter+3
ldr r9,[r12,#-12]
add r7,r7,r11
ldr r10,[r12,#-8]
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
eor r4,r4,r8
# ifdef __thumb2__
it hi
# endif
ldrhi r8,[sp,#4*(32+2)] @ re-load len
eor r5,r5,r9
eor r6,r6,r10
str r4,[r14],#16 @ store output
eor r7,r7,r11
str r5,[r14,#-12]
sub r11,r8,#64*4 @ len-=64*4
str r6,[r14,#-8]
str r7,[r14,#-4]
bhi .Loop_neon_outer
b .Ldone_neon
.align 4
.Lbreak_neon:
@ harmonize NEON and integer-only stack frames: load data
@ from NEON frame, but save to integer-only one; distance
@ between the two is 4*(32+4+16-32)=4*(20).
str r11, [sp,#4*(20+32+2)] @ save len
add r11,sp,#4*(32+4)
str r12, [sp,#4*(20+32+1)] @ save inp
str r14, [sp,#4*(20+32+0)] @ save out
ldr r12,[sp,#4*(16+10)]
ldr r14,[sp,#4*(16+11)]
vldmia r11,{d8,d9,d10,d11,d12,d13,d14,d15} @ fulfill ABI requirement
str r12,[sp,#4*(20+16+10)] @ copy "rx"
str r14,[sp,#4*(20+16+11)] @ copy "rx"
ldr r11, [sp,#4*(15)]
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
ldr r10, [sp,#4*(13)]
ldr r14,[sp,#4*(14)]
str r11, [sp,#4*(20+16+15)]
add r11,sp,#4*(20)
vst1.32 {q0,q1},[r11]! @ copy key
add sp,sp,#4*(20) @ switch frame
vst1.32 {q2,q3},[r11]
mov r11,#10
b .Loop @ go integer-only
.align 4
.Ltail_neon:
cmp r11,#64*3
bhs .L192_or_more_neon
cmp r11,#64*2
bhs .L128_or_more_neon
cmp r11,#64*1
bhs .L64_or_more_neon
add r8,sp,#4*(8)
vst1.8 {q0,q1},[sp]
add r10,sp,#4*(0)
vst1.8 {q2,q3},[r8]
b .Loop_tail_neon
.align 4
.L64_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
veor q2,q2,q14
veor q3,q3,q15
vst1.8 {q0,q1},[r14]!
vst1.8 {q2,q3},[r14]!
beq .Ldone_neon
add r8,sp,#4*(8)
vst1.8 {q4,q5},[sp]
add r10,sp,#4*(0)
vst1.8 {q6,q7},[r8]
sub r11,r11,#64*1 @ len-=64*1
b .Loop_tail_neon
.align 4
.L128_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
veor q5,q5,q13
vst1.8 {q0,q1},[r14]!
veor q6,q6,q14
vst1.8 {q2,q3},[r14]!
veor q7,q7,q15
vst1.8 {q4,q5},[r14]!
vst1.8 {q6,q7},[r14]!
beq .Ldone_neon
add r8,sp,#4*(8)
vst1.8 {q8,q9},[sp]
add r10,sp,#4*(0)
vst1.8 {q10,q11},[r8]
sub r11,r11,#64*2 @ len-=64*2
b .Loop_tail_neon
.align 4
.L192_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
veor q5,q5,q13
vld1.8 {q12,q13},[r12]!
veor q6,q6,q14
vst1.8 {q0,q1},[r14]!
veor q7,q7,q15
vld1.8 {q14,q15},[r12]!
veor q8,q8,q12
vst1.8 {q2,q3},[r14]!
veor q9,q9,q13
vst1.8 {q4,q5},[r14]!
veor q10,q10,q14
vst1.8 {q6,q7},[r14]!
veor q11,q11,q15
vst1.8 {q8,q9},[r14]!
vst1.8 {q10,q11},[r14]!
beq .Ldone_neon
ldmia sp,{r8,r9,r10,r11} @ load key material
add r0,r0,r8 @ accumulate key material
add r8,sp,#4*(4)
add r1,r1,r9
add r2,r2,r10
add r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
add r8,sp,#4*(8)
add r5,r5,r9
add r6,r6,r10
add r7,r7,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
stmia sp,{r0,r1,r2,r3,r4,r5,r6,r7}
add r0,sp,#4*(16+8)
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
add r8,sp,#4*(12)
add r1,r1,r9
add r2,r2,r10
add r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
add r8,sp,#4*(8)
add r5,r5,r9
add r4,r4,#3 @ counter+3
add r6,r6,r10
add r7,r7,r11
ldr r11,[sp,#4*(32+2)] @ re-load len
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
stmia r8,{r0,r1,r2,r3,r4,r5,r6,r7}
add r10,sp,#4*(0)
sub r11,r11,#64*3 @ len-=64*3
.Loop_tail_neon:
ldrb r8,[r10],#1 @ read buffer on stack
ldrb r9,[r12],#1 @ read input
subs r11,r11,#1
eor r8,r8,r9
strb r8,[r14],#1 @ store output
bne .Loop_tail_neon
.Ldone_neon:
add sp,sp,#4*(32+4)
vldmia sp,{d8,d9,d10,d11,d12,d13,d14,d15}
add sp,sp,#4*(16+3)
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
.size ChaCha20_neon,.-ChaCha20_neon
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
chairq/First-choice
| 68,332
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/p256-x86_64-asm-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.section __DATA,__const
.p2align 6
L$poly:
.quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
L$One:
.long 1,1,1,1,1,1,1,1
L$Two:
.long 2,2,2,2,2,2,2,2
L$Three:
.long 3,3,3,3,3,3,3,3
L$ONE_mont:
.quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
L$ord:
.quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000
L$ordK:
.quad 0xccd1c8aaee00bc4f
.text
.globl _ecp_nistz256_neg
.private_extern _ecp_nistz256_neg
.p2align 5
_ecp_nistz256_neg:
_CET_ENDBR
pushq %r12
pushq %r13
L$neg_body:
xorq %r8,%r8
xorq %r9,%r9
xorq %r10,%r10
xorq %r11,%r11
xorq %r13,%r13
subq 0(%rsi),%r8
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r8,%rax
sbbq 24(%rsi),%r11
leaq L$poly(%rip),%rsi
movq %r9,%rdx
sbbq $0,%r13
addq 0(%rsi),%r8
movq %r10,%rcx
adcq 8(%rsi),%r9
adcq 16(%rsi),%r10
movq %r11,%r12
adcq 24(%rsi),%r11
testq %r13,%r13
cmovzq %rax,%r8
cmovzq %rdx,%r9
movq %r8,0(%rdi)
cmovzq %rcx,%r10
movq %r9,8(%rdi)
cmovzq %r12,%r11
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq 0(%rsp),%r13
movq 8(%rsp),%r12
leaq 16(%rsp),%rsp
L$neg_epilogue:
ret
.globl _ecp_nistz256_ord_mul_mont
.private_extern _ecp_nistz256_ord_mul_mont
.p2align 5
_ecp_nistz256_ord_mul_mont:
_CET_ENDBR
leaq _OPENSSL_ia32cap_P(%rip),%rcx
movq 8(%rcx),%rcx
andl $0x80100,%ecx
cmpl $0x80100,%ecx
je L$ecp_nistz256_ord_mul_montx
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_mul_body:
movq 0(%rdx),%rax
movq %rdx,%rbx
leaq L$ord(%rip),%r14
movq L$ordK(%rip),%r15
movq %rax,%rcx
mulq 0(%rsi)
movq %rax,%r8
movq %rcx,%rax
movq %rdx,%r9
mulq 8(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq 16(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %r8,%r13
imulq %r15,%r8
movq %rdx,%r11
mulq 24(%rsi)
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq 0(%r14)
movq %r8,%rbp
addq %rax,%r13
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rcx
subq %r8,%r10
sbbq $0,%r8
mulq 8(%r14)
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %rbp,%rax
adcq %rdx,%r10
movq %rbp,%rdx
adcq $0,%r8
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 8(%rbx),%rax
sbbq %rdx,%rbp
addq %r8,%r11
adcq %rbp,%r12
adcq $0,%r13
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %r9,%rcx
imulq %r15,%r9
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r12
adcq $0,%rdx
xorq %r8,%r8
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
mulq 0(%r14)
movq %r9,%rbp
addq %rax,%rcx
movq %r9,%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%r9
mulq 8(%r14)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq %rdx,%r11
movq %rbp,%rdx
adcq $0,%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r12
movq 16(%rbx),%rax
sbbq %rdx,%rbp
addq %r9,%r12
adcq %rbp,%r13
adcq $0,%r8
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %r10,%rcx
imulq %r15,%r10
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r13
adcq $0,%rdx
xorq %r9,%r9
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
mulq 0(%r14)
movq %r10,%rbp
addq %rax,%rcx
movq %r10,%rax
adcq %rdx,%rcx
subq %r10,%r12
sbbq $0,%r10
mulq 8(%r14)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq %rdx,%r12
movq %rbp,%rdx
adcq $0,%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r13
movq 24(%rbx),%rax
sbbq %rdx,%rbp
addq %r10,%r13
adcq %rbp,%r8
adcq $0,%r9
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rcx,%rax
adcq $0,%rdx
movq %r11,%rcx
imulq %r15,%r11
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r8
adcq $0,%rdx
xorq %r10,%r10
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
mulq 0(%r14)
movq %r11,%rbp
addq %rax,%rcx
movq %r11,%rax
adcq %rdx,%rcx
subq %r11,%r13
sbbq $0,%r11
mulq 8(%r14)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq %rdx,%r13
movq %rbp,%rdx
adcq $0,%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
sbbq %rdx,%rbp
addq %r11,%r8
adcq %rbp,%r9
adcq $0,%r10
movq %r12,%rsi
subq 0(%r14),%r12
movq %r13,%r11
sbbq 8(%r14),%r13
movq %r8,%rcx
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rsi,%r12
cmovcq %r11,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_mul_epilogue:
ret
.globl _ecp_nistz256_ord_sqr_mont
.private_extern _ecp_nistz256_ord_sqr_mont
.p2align 5
_ecp_nistz256_ord_sqr_mont:
_CET_ENDBR
leaq _OPENSSL_ia32cap_P(%rip),%rcx
movq 8(%rcx),%rcx
andl $0x80100,%ecx
cmpl $0x80100,%ecx
je L$ecp_nistz256_ord_sqr_montx
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_sqr_body:
movq 0(%rsi),%r8
movq 8(%rsi),%rax
movq 16(%rsi),%r14
movq 24(%rsi),%r15
leaq L$ord(%rip),%rsi
movq %rdx,%rbx
jmp L$oop_ord_sqr
.p2align 5
L$oop_ord_sqr:
movq %rax,%rbp
mulq %r8
movq %rax,%r9
.byte 102,72,15,110,205
movq %r14,%rax
movq %rdx,%r10
mulq %r8
addq %rax,%r10
movq %r15,%rax
.byte 102,73,15,110,214
adcq $0,%rdx
movq %rdx,%r11
mulq %r8
addq %rax,%r11
movq %r15,%rax
.byte 102,73,15,110,223
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
movq %rax,%r13
movq %r14,%rax
movq %rdx,%r14
mulq %rbp
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r15
mulq %rbp
addq %rax,%r12
adcq $0,%rdx
addq %r15,%r12
adcq %rdx,%r13
adcq $0,%r14
xorq %r15,%r15
movq %r8,%rax
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
.byte 102,72,15,126,200
movq %rdx,%rbp
mulq %rax
addq %rbp,%r9
adcq %rax,%r10
.byte 102,72,15,126,208
adcq $0,%rdx
movq %rdx,%rbp
mulq %rax
addq %rbp,%r11
adcq %rax,%r12
.byte 102,72,15,126,216
adcq $0,%rdx
movq %rdx,%rbp
movq %r8,%rcx
imulq 32(%rsi),%r8
mulq %rax
addq %rbp,%r13
adcq %rax,%r14
movq 0(%rsi),%rax
adcq %rdx,%r15
mulq %r8
movq %r8,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r8,%r10
sbbq $0,%rbp
mulq %r8
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %r8,%rax
adcq %rdx,%r10
movq %r8,%rdx
adcq $0,%rbp
movq %r9,%rcx
imulq 32(%rsi),%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 0(%rsi),%rax
sbbq %rdx,%r8
addq %rbp,%r11
adcq $0,%r8
mulq %r9
movq %r9,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%rbp
mulq %r9
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %r9,%rax
adcq %rdx,%r11
movq %r9,%rdx
adcq $0,%rbp
movq %r10,%rcx
imulq 32(%rsi),%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
movq 0(%rsi),%rax
sbbq %rdx,%r9
addq %rbp,%r8
adcq $0,%r9
mulq %r10
movq %r10,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r10,%r8
sbbq $0,%rbp
mulq %r10
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %r10,%rax
adcq %rdx,%r8
movq %r10,%rdx
adcq $0,%rbp
movq %r11,%rcx
imulq 32(%rsi),%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r9
movq 0(%rsi),%rax
sbbq %rdx,%r10
addq %rbp,%r9
adcq $0,%r10
mulq %r11
movq %r11,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r11,%r9
sbbq $0,%rbp
mulq %r11
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
movq %r11,%rdx
adcq $0,%rbp
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r10
sbbq %rdx,%r11
addq %rbp,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r12,%r8
adcq %r13,%r9
movq %r8,%r12
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%rax
adcq $0,%rdx
subq 0(%rsi),%r8
movq %r10,%r14
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r15
sbbq 24(%rsi),%r11
sbbq $0,%rdx
cmovcq %r12,%r8
cmovncq %r9,%rax
cmovncq %r10,%r14
cmovncq %r11,%r15
decq %rbx
jnz L$oop_ord_sqr
movq %r8,0(%rdi)
movq %rax,8(%rdi)
pxor %xmm1,%xmm1
movq %r14,16(%rdi)
pxor %xmm2,%xmm2
movq %r15,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_sqr_epilogue:
ret
.p2align 5
ecp_nistz256_ord_mul_montx:
L$ecp_nistz256_ord_mul_montx:
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_mulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
leaq L$ord-128(%rip),%r14
movq L$ordK(%rip),%r15
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
mulxq %r11,%rbp,%r11
addq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
mulxq %r15,%rdx,%rax
adcq %rbp,%r10
adcq %rcx,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24+128(%r14),%rcx,%rbp
movq 8(%rbx),%rdx
adcxq %rcx,%r11
adoxq %rbp,%r12
adcxq %r8,%r12
adoxq %r8,%r13
adcq $0,%r13
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%r14),%rcx,%rbp
movq 16(%rbx),%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r9,%r13
adoxq %r9,%r8
adcq $0,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%r14),%rcx,%rbp
movq 24(%rbx),%rdx
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r10,%r8
adoxq %r10,%r9
adcq $0,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r8
adoxq %rbp,%r9
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%r14),%rcx,%rbp
leaq 128(%r14),%r14
movq %r12,%rbx
adcxq %rcx,%r8
adoxq %rbp,%r9
movq %r13,%rdx
adcxq %r11,%r9
adoxq %r11,%r10
adcq $0,%r10
movq %r8,%rcx
subq 0(%r14),%r12
sbbq 8(%r14),%r13
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_mulx_epilogue:
ret
.p2align 5
ecp_nistz256_ord_sqr_montx:
L$ecp_nistz256_ord_sqr_montx:
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$ord_sqrx_body:
movq %rdx,%rbx
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq L$ord(%rip),%rsi
jmp L$oop_ord_sqrx
.p2align 5
L$oop_ord_sqrx:
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
movq %rdx,%rax
.byte 102,73,15,110,206
mulxq %r8,%rbp,%r12
movq %r14,%rdx
addq %rcx,%r10
.byte 102,73,15,110,215
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq %rax,%rdx
.byte 102,73,15,110,216
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
.byte 102,72,15,126,202
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
.byte 102,72,15,126,210
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
mulxq %rdx,%rcx,%rbp
.byte 0x67
.byte 102,72,15,126,218
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
adoxq %rbp,%r13
mulxq %rdx,%rcx,%rax
adoxq %rcx,%r14
adoxq %rax,%r15
movq %r8,%rdx
mulxq 32(%rsi),%rdx,%rcx
xorq %rax,%rax
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
adcxq %rax,%r8
movq %r9,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
adoxq %rax,%r9
movq %r10,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
adcxq %rax,%r10
movq %r11,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
adoxq %rax,%r11
addq %r8,%r12
adcq %r13,%r9
movq %r12,%rdx
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%r14
adcq $0,%rax
subq 0(%rsi),%r12
movq %r10,%r15
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r8
sbbq 24(%rsi),%r11
sbbq $0,%rax
cmovncq %r12,%rdx
cmovncq %r9,%r14
cmovncq %r10,%r15
cmovncq %r11,%r8
decq %rbx
jnz L$oop_ord_sqrx
movq %rdx,0(%rdi)
movq %r14,8(%rdi)
pxor %xmm1,%xmm1
movq %r15,16(%rdi)
pxor %xmm2,%xmm2
movq %r8,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$ord_sqrx_epilogue:
ret
.globl _ecp_nistz256_mul_mont
.private_extern _ecp_nistz256_mul_mont
.p2align 5
_ecp_nistz256_mul_mont:
_CET_ENDBR
leaq _OPENSSL_ia32cap_P(%rip),%rcx
movq 8(%rcx),%rcx
andl $0x80100,%ecx
L$mul_mont:
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mul_body:
cmpl $0x80100,%ecx
je L$mul_montx
movq %rdx,%rbx
movq 0(%rdx),%rax
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
call __ecp_nistz256_mul_montq
jmp L$mul_mont_done
.p2align 5
L$mul_montx:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
call __ecp_nistz256_mul_montx
L$mul_mont_done:
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$mul_epilogue:
ret
.p2align 5
__ecp_nistz256_mul_montq:
movq %rax,%rbp
mulq %r9
movq L$poly+8(%rip),%r14
movq %rax,%r8
movq %rbp,%rax
movq %rdx,%r9
mulq %r10
movq L$poly+24(%rip),%r15
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %r11
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r12
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
xorq %r13,%r13
movq %rdx,%r12
movq %r8,%rbp
shlq $32,%r8
mulq %r15
shrq $32,%rbp
addq %r8,%r9
adcq %rbp,%r10
adcq %rax,%r11
movq 8(%rbx),%rax
adcq %rdx,%r12
adcq $0,%r13
xorq %r8,%r8
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
movq %r9,%rbp
shlq $32,%r9
mulq %r15
shrq $32,%rbp
addq %r9,%r10
adcq %rbp,%r11
adcq %rax,%r12
movq 16(%rbx),%rax
adcq %rdx,%r13
adcq $0,%r8
xorq %r9,%r9
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
movq %r10,%rbp
shlq $32,%r10
mulq %r15
shrq $32,%rbp
addq %r10,%r11
adcq %rbp,%r12
adcq %rax,%r13
movq 24(%rbx),%rax
adcq %rdx,%r8
adcq $0,%r9
xorq %r10,%r10
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
movq %r11,%rbp
shlq $32,%r11
mulq %r15
shrq $32,%rbp
addq %r11,%r12
adcq %rbp,%r13
movq %r12,%rcx
adcq %rax,%r8
adcq %rdx,%r9
movq %r13,%rbp
adcq $0,%r10
subq $-1,%r12
movq %r8,%rbx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rdx
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rcx,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rbx,%r8
movq %r13,8(%rdi)
cmovcq %rdx,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.globl _ecp_nistz256_sqr_mont
.private_extern _ecp_nistz256_sqr_mont
.p2align 5
_ecp_nistz256_sqr_mont:
_CET_ENDBR
leaq _OPENSSL_ia32cap_P(%rip),%rcx
movq 8(%rcx),%rcx
andl $0x80100,%ecx
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$sqr_body:
cmpl $0x80100,%ecx
je L$sqr_montx
movq 0(%rsi),%rax
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
call __ecp_nistz256_sqr_montq
jmp L$sqr_mont_done
.p2align 5
L$sqr_montx:
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq -128(%rsi),%rsi
call __ecp_nistz256_sqr_montx
L$sqr_mont_done:
movq 0(%rsp),%r15
movq 8(%rsp),%r14
movq 16(%rsp),%r13
movq 24(%rsp),%r12
movq 32(%rsp),%rbx
movq 40(%rsp),%rbp
leaq 48(%rsp),%rsp
L$sqr_epilogue:
ret
.p2align 5
__ecp_nistz256_sqr_montq:
movq %rax,%r13
mulq %r14
movq %rax,%r9
movq %r15,%rax
movq %rdx,%r10
mulq %r13
addq %rax,%r10
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r13
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq %r14
addq %rax,%r12
movq %r8,%rax
adcq $0,%rdx
addq %rbp,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %r15
xorq %r15,%r15
addq %rax,%r13
movq 0(%rsi),%rax
movq %rdx,%r14
adcq $0,%r14
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
movq 8(%rsi),%rax
movq %rdx,%rcx
mulq %rax
addq %rcx,%r9
adcq %rax,%r10
movq 16(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r11
adcq %rax,%r12
movq 24(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r13
adcq %rax,%r14
movq %r8,%rax
adcq %rdx,%r15
movq L$poly+8(%rip),%rsi
movq L$poly+24(%rip),%rbp
movq %r8,%rcx
shlq $32,%r8
mulq %rbp
shrq $32,%rcx
addq %r8,%r9
adcq %rcx,%r10
adcq %rax,%r11
movq %r9,%rax
adcq $0,%rdx
movq %r9,%rcx
shlq $32,%r9
movq %rdx,%r8
mulq %rbp
shrq $32,%rcx
addq %r9,%r10
adcq %rcx,%r11
adcq %rax,%r8
movq %r10,%rax
adcq $0,%rdx
movq %r10,%rcx
shlq $32,%r10
movq %rdx,%r9
mulq %rbp
shrq $32,%rcx
addq %r10,%r11
adcq %rcx,%r8
adcq %rax,%r9
movq %r11,%rax
adcq $0,%rdx
movq %r11,%rcx
shlq $32,%r11
movq %rdx,%r10
mulq %rbp
shrq $32,%rcx
addq %r11,%r8
adcq %rcx,%r9
adcq %rax,%r10
adcq $0,%rdx
xorq %r11,%r11
addq %r8,%r12
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %rdx,%r15
movq %r13,%r9
adcq $0,%r11
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%rcx
sbbq %rbp,%r15
sbbq $0,%r11
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %rcx,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.p2align 5
__ecp_nistz256_mul_montx:
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
movq $32,%r14
xorq %r13,%r13
mulxq %r11,%rbp,%r11
movq L$poly+24(%rip),%r15
adcq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
adcq %rbp,%r10
shlxq %r14,%r8,%rbp
adcq %rcx,%r11
shrxq %r14,%r8,%rcx
adcq $0,%r12
addq %rbp,%r9
adcq %rcx,%r10
mulxq %r15,%rcx,%rbp
movq 8(%rbx),%rdx
adcq %rcx,%r11
adcq %rbp,%r12
adcq $0,%r13
xorq %r8,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
adcxq %rcx,%r12
shlxq %r14,%r9,%rcx
adoxq %rbp,%r13
shrxq %r14,%r9,%rbp
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
addq %rcx,%r10
adcq %rbp,%r11
mulxq %r15,%rcx,%rbp
movq 16(%rbx),%rdx
adcq %rcx,%r12
adcq %rbp,%r13
adcq $0,%r8
xorq %r9,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
adcxq %rcx,%r13
shlxq %r14,%r10,%rcx
adoxq %rbp,%r8
shrxq %r14,%r10,%rbp
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
addq %rcx,%r11
adcq %rbp,%r12
mulxq %r15,%rcx,%rbp
movq 24(%rbx),%rdx
adcq %rcx,%r13
adcq %rbp,%r8
adcq $0,%r9
xorq %r10,%r10
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
adcxq %rcx,%r8
shlxq %r14,%r11,%rcx
adoxq %rbp,%r9
shrxq %r14,%r11,%rbp
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
addq %rcx,%r12
adcq %rbp,%r13
mulxq %r15,%rcx,%rbp
movq %r12,%rbx
movq L$poly+8(%rip),%r14
adcq %rcx,%r8
movq %r13,%rdx
adcq %rbp,%r9
adcq $0,%r10
xorl %eax,%eax
movq %r8,%rcx
sbbq $-1,%r12
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rbp
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %rbp,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_sqr_montx:
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
xorl %eax,%eax
adcq %rcx,%r10
mulxq %r8,%rbp,%r12
movq %r14,%rdx
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq 0+128(%rsi),%rdx
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
movq 8+128(%rsi),%rdx
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
movq 16+128(%rsi),%rdx
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
.byte 0x67
mulxq %rdx,%rcx,%rbp
movq 24+128(%rsi),%rdx
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
movq $32,%rsi
adoxq %rbp,%r13
.byte 0x67,0x67
mulxq %rdx,%rcx,%rax
movq L$poly+24(%rip),%rdx
adoxq %rcx,%r14
shlxq %rsi,%r8,%rcx
adoxq %rax,%r15
shrxq %rsi,%r8,%rax
movq %rdx,%rbp
addq %rcx,%r9
adcq %rax,%r10
mulxq %r8,%rcx,%r8
adcq %rcx,%r11
shlxq %rsi,%r9,%rcx
adcq $0,%r8
shrxq %rsi,%r9,%rax
addq %rcx,%r10
adcq %rax,%r11
mulxq %r9,%rcx,%r9
adcq %rcx,%r8
shlxq %rsi,%r10,%rcx
adcq $0,%r9
shrxq %rsi,%r10,%rax
addq %rcx,%r11
adcq %rax,%r8
mulxq %r10,%rcx,%r10
adcq %rcx,%r9
shlxq %rsi,%r11,%rcx
adcq $0,%r10
shrxq %rsi,%r11,%rax
addq %rcx,%r8
adcq %rax,%r9
mulxq %r11,%rcx,%r11
adcq %rcx,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r8,%r12
movq L$poly+8(%rip),%rsi
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %r11,%r15
movq %r13,%r9
adcq $0,%rdx
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%r11
sbbq %rbp,%r15
sbbq $0,%rdx
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %r11,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.globl _ecp_nistz256_select_w5
.private_extern _ecp_nistz256_select_w5
.p2align 5
_ecp_nistz256_select_w5:
_CET_ENDBR
leaq _OPENSSL_ia32cap_P(%rip),%rax
movq 8(%rax),%rax
testl $32,%eax
jnz L$avx2_select_w5
movdqa L$One(%rip),%xmm0
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movdqa %xmm0,%xmm8
pshufd $0,%xmm1,%xmm1
movq $16,%rax
L$select_loop_sse_w5:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
pcmpeqd %xmm1,%xmm15
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
movdqa 64(%rsi),%xmm13
movdqa 80(%rsi),%xmm14
leaq 96(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
pand %xmm15,%xmm13
por %xmm12,%xmm5
pand %xmm15,%xmm14
por %xmm13,%xmm6
por %xmm14,%xmm7
decq %rax
jnz L$select_loop_sse_w5
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
movdqu %xmm6,64(%rdi)
movdqu %xmm7,80(%rdi)
ret
L$SEH_end_ecp_nistz256_select_w5:
.globl _ecp_nistz256_select_w7
.private_extern _ecp_nistz256_select_w7
.p2align 5
_ecp_nistz256_select_w7:
_CET_ENDBR
leaq _OPENSSL_ia32cap_P(%rip),%rax
movq 8(%rax),%rax
testl $32,%eax
jnz L$avx2_select_w7
movdqa L$One(%rip),%xmm8
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa %xmm8,%xmm0
pshufd $0,%xmm1,%xmm1
movq $64,%rax
L$select_loop_sse_w7:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
pcmpeqd %xmm1,%xmm15
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
leaq 64(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
prefetcht0 255(%rsi)
por %xmm12,%xmm5
decq %rax
jnz L$select_loop_sse_w7
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
ret
L$SEH_end_ecp_nistz256_select_w7:
.p2align 5
ecp_nistz256_avx2_select_w5:
L$avx2_select_w5:
vzeroupper
vmovdqa L$Two(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vpxor %ymm4,%ymm4,%ymm4
vmovdqa L$One(%rip),%ymm5
vmovdqa L$Two(%rip),%ymm10
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $8,%rax
L$select_loop_avx2_w5:
vmovdqa 0(%rsi),%ymm6
vmovdqa 32(%rsi),%ymm7
vmovdqa 64(%rsi),%ymm8
vmovdqa 96(%rsi),%ymm11
vmovdqa 128(%rsi),%ymm12
vmovdqa 160(%rsi),%ymm13
vpcmpeqd %ymm1,%ymm5,%ymm9
vpcmpeqd %ymm1,%ymm10,%ymm14
vpaddd %ymm0,%ymm5,%ymm5
vpaddd %ymm0,%ymm10,%ymm10
leaq 192(%rsi),%rsi
vpand %ymm9,%ymm6,%ymm6
vpand %ymm9,%ymm7,%ymm7
vpand %ymm9,%ymm8,%ymm8
vpand %ymm14,%ymm11,%ymm11
vpand %ymm14,%ymm12,%ymm12
vpand %ymm14,%ymm13,%ymm13
vpxor %ymm6,%ymm2,%ymm2
vpxor %ymm7,%ymm3,%ymm3
vpxor %ymm8,%ymm4,%ymm4
vpxor %ymm11,%ymm2,%ymm2
vpxor %ymm12,%ymm3,%ymm3
vpxor %ymm13,%ymm4,%ymm4
decq %rax
jnz L$select_loop_avx2_w5
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vmovdqu %ymm4,64(%rdi)
vzeroupper
ret
L$SEH_end_ecp_nistz256_avx2_select_w5:
.p2align 5
ecp_nistz256_avx2_select_w7:
L$avx2_select_w7:
_CET_ENDBR
vzeroupper
vmovdqa L$Three(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vmovdqa L$One(%rip),%ymm4
vmovdqa L$Two(%rip),%ymm8
vmovdqa L$Three(%rip),%ymm12
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $21,%rax
L$select_loop_avx2_w7:
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vmovdqa 64(%rsi),%ymm9
vmovdqa 96(%rsi),%ymm10
vmovdqa 128(%rsi),%ymm13
vmovdqa 160(%rsi),%ymm14
vpcmpeqd %ymm1,%ymm4,%ymm7
vpcmpeqd %ymm1,%ymm8,%ymm11
vpcmpeqd %ymm1,%ymm12,%ymm15
vpaddd %ymm0,%ymm4,%ymm4
vpaddd %ymm0,%ymm8,%ymm8
vpaddd %ymm0,%ymm12,%ymm12
leaq 192(%rsi),%rsi
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpand %ymm11,%ymm9,%ymm9
vpand %ymm11,%ymm10,%ymm10
vpand %ymm15,%ymm13,%ymm13
vpand %ymm15,%ymm14,%ymm14
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vpxor %ymm9,%ymm2,%ymm2
vpxor %ymm10,%ymm3,%ymm3
vpxor %ymm13,%ymm2,%ymm2
vpxor %ymm14,%ymm3,%ymm3
decq %rax
jnz L$select_loop_avx2_w7
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vpcmpeqd %ymm1,%ymm4,%ymm7
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vzeroupper
ret
L$SEH_end_ecp_nistz256_avx2_select_w7:
.p2align 5
__ecp_nistz256_add_toq:
xorq %r11,%r11
addq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_sub_fromq:
subq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq %r11,%r11
addq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
testq %r11,%r11
cmovzq %rax,%r12
cmovzq %rbp,%r13
movq %r12,0(%rdi)
cmovzq %rcx,%r8
movq %r13,8(%rdi)
cmovzq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_subq:
subq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq %r11,%r11
addq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
testq %r11,%r11
cmovnzq %rax,%r12
cmovnzq %rbp,%r13
cmovnzq %rcx,%r8
cmovnzq %r10,%r9
ret
.p2align 5
__ecp_nistz256_mul_by_2q:
xorq %r11,%r11
addq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.globl _ecp_nistz256_point_double
.private_extern _ecp_nistz256_point_double
.p2align 5
_ecp_nistz256_point_double:
_CET_ENDBR
leaq _OPENSSL_ia32cap_P(%rip),%rcx
movq 8(%rcx),%rcx
andl $0x80100,%ecx
cmpl $0x80100,%ecx
je L$point_doublex
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $160+8,%rsp
L$point_doubleq_body:
L$point_double_shortcutq:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq L$poly+8(%rip),%r14
movq L$poly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-0(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 32(%rbx),%rax
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-0(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montq
call __ecp_nistz256_mul_by_2q
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montq
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rax
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 0+32(%rsp),%rax
movq 8+32(%rsp),%r14
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montq
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subq
movq 32(%rsp),%rax
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-0(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromq
leaq 160+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_doubleq_epilogue:
ret
.globl _ecp_nistz256_point_add
.private_extern _ecp_nistz256_point_add
.p2align 5
_ecp_nistz256_point_add:
_CET_ENDBR
leaq _OPENSSL_ia32cap_P(%rip),%rcx
movq 8(%rcx),%rcx
andl $0x80100,%ecx
cmpl $0x80100,%ecx
je L$point_addx
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $576+8,%rsp
L$point_addq_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-0(%rsi),%rsi
movq %rax,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rax
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-0(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 416(%rsp),%rax
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 512(%rsp),%rax
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq 0+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 480(%rsp),%rax
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz L$add_proceedq
testq %r9,%r9
jz L$add_doubleq
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp L$add_doneq
.p2align 5
L$add_doubleq:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
jmp L$point_double_shortcutq
.p2align 5
L$add_proceedq:
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq 0+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0(%rsp),%rax
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 160(%rsp),%rax
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
L$add_doneq:
leaq 576+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_addq_epilogue:
ret
.globl _ecp_nistz256_point_add_affine
.private_extern _ecp_nistz256_point_add_affine
.p2align 5
_ecp_nistz256_point_add_affine:
_CET_ENDBR
leaq _OPENSSL_ia32cap_P(%rip),%rcx
movq 8(%rcx),%rcx
andl $0x80100,%ecx
cmpl $0x80100,%ecx
je L$point_add_affinex
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $480+8,%rsp
L$add_affineq_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-0(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rax
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-0(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+96(%rsp),%rax
movq 8+96(%rsp),%r14
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq 0+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rax
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq 0+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand L$ONE_mont(%rip),%xmm2
pand L$ONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$add_affineq_epilogue:
ret
.p2align 5
__ecp_nistz256_add_tox:
xorq %r11,%r11
adcq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_sub_fromx:
xorq %r11,%r11
sbbq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq $0,%r11
xorq %r10,%r10
adcq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
btq $0,%r11
cmovncq %rax,%r12
cmovncq %rbp,%r13
movq %r12,0(%rdi)
cmovncq %rcx,%r8
movq %r13,8(%rdi)
cmovncq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
__ecp_nistz256_subx:
xorq %r11,%r11
sbbq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq $0,%r11
xorq %r9,%r9
adcq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
btq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
cmovcq %rcx,%r8
cmovcq %r10,%r9
ret
.p2align 5
__ecp_nistz256_mul_by_2x:
xorq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.p2align 5
ecp_nistz256_point_doublex:
L$point_doublex:
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $160+8,%rsp
L$point_doublex_body:
L$point_double_shortcutx:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq L$poly+8(%rip),%r14
movq L$poly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-128(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 32(%rbx),%rdx
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-128(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montx
call __ecp_nistz256_mul_by_2x
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montx
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rdx
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 0+32(%rsp),%rdx
movq 8+32(%rsp),%r14
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montx
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subx
movq 32(%rsp),%rdx
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-128(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromx
leaq 160+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_doublex_epilogue:
ret
.p2align 5
ecp_nistz256_point_addx:
L$point_addx:
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $576+8,%rsp
L$point_addx_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-128(%rsi),%rsi
movq %rdx,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rdx
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-128(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 416(%rsp),%rdx
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 512(%rsp),%rdx
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq -128+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 480(%rsp),%rdx
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz L$add_proceedx
testq %r9,%r9
jz L$add_doublex
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp L$add_donex
.p2align 5
L$add_doublex:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
jmp L$point_double_shortcutx
.p2align 5
L$add_proceedx:
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq -128+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%rdx
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 160(%rsp),%rdx
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
L$add_donex:
leaq 576+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$point_addx_epilogue:
ret
.p2align 5
ecp_nistz256_point_add_affinex:
L$point_add_affinex:
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $480+8,%rsp
L$add_affinex_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-128(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rdx
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-128(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+96(%rsp),%rdx
movq 8+96(%rsp),%r14
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq -128+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rdx
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq -128+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand L$ONE_mont(%rip),%xmm2
pand L$ONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbx
movq -8(%rsi),%rbp
leaq (%rsi),%rsp
L$add_affinex_epilogue:
ret
#endif
|
chairq/First-choice
| 23,634
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/vpaes-armv7-linux32.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
.syntax unified
.arch armv7-a
.fpu neon
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
.text
.type _vpaes_consts,%object
.align 7 @ totally strategic alignment
_vpaes_consts:
.Lk_mc_forward:@ mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
.Lk_mc_backward:@ mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
.Lk_sr:@ sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
@
@ "Hot" constants
@
.Lk_inv:@ inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
.Lk_ipt:@ input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
.Lk_sbo:@ sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
.Lk_sb1:@ sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.Lk_sb2:@ sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.size _vpaes_consts,.-_vpaes_consts
.align 6
@@
@@ _aes_preheat
@@
@@ Fills q9-q15 as specified below.
@@
.type _vpaes_preheat,%function
.align 4
_vpaes_preheat:
adr r10, .Lk_inv
vmov.i8 q9, #0x0f @ .Lk_s0F
vld1.64 {q10,q11}, [r10]! @ .Lk_inv
add r10, r10, #64 @ Skip .Lk_ipt, .Lk_sbo
vld1.64 {q12,q13}, [r10]! @ .Lk_sb1
vld1.64 {q14,q15}, [r10] @ .Lk_sb2
bx lr
@@
@@ _aes_encrypt_core
@@
@@ AES-encrypt q0.
@@
@@ Inputs:
@@ q0 = input
@@ q9-q15 as in _vpaes_preheat
@@ [r2] = scheduled keys
@@
@@ Output in q0
@@ Clobbers q1-q5, r8-r11
@@ Preserves q6-q8 so you get some local vectors
@@
@@
.type _vpaes_encrypt_core,%function
.align 4
_vpaes_encrypt_core:
mov r9, r2
ldr r8, [r2,#240] @ pull rounds
adr r11, .Lk_ipt
@ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
@ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
vld1.64 {q2, q3}, [r11]
adr r11, .Lk_mc_forward+16
vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0
vtbl.8 d2, {q2}, d2 @ vpshufb %xmm1, %xmm2, %xmm1
vtbl.8 d3, {q2}, d3
vtbl.8 d4, {q3}, d0 @ vpshufb %xmm0, %xmm3, %xmm2
vtbl.8 d5, {q3}, d1
veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0
veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
@ .Lenc_entry ends with a bnz instruction which is normally paired with
@ subs in .Lenc_loop.
tst r8, r8
b .Lenc_entry
.align 4
.Lenc_loop:
@ middle of middle round
add r10, r11, #0x40
vtbl.8 d8, {q13}, d4 @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
vtbl.8 d9, {q13}, d5
vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
vtbl.8 d0, {q12}, d6 @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
vtbl.8 d1, {q12}, d7
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
vtbl.8 d10, {q15}, d4 @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
vtbl.8 d11, {q15}, d5
veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A
vtbl.8 d4, {q14}, d6 @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
vtbl.8 d5, {q14}, d7
vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
vtbl.8 d6, {q0}, d2 @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
vtbl.8 d7, {q0}, d3
veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
@ Write to q5 instead of q0, so the table and destination registers do
@ not overlap.
vtbl.8 d10, {q0}, d8 @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
vtbl.8 d11, {q0}, d9
veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
vtbl.8 d8, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
vtbl.8 d9, {q3}, d3
@ Here we restore the original q0/q5 usage.
veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and r11, r11, #~(1<<6) @ and $0x30, %r11 # ... mod 4
veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
subs r8, r8, #1 @ nr--
.Lenc_entry:
@ top of round
vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i
vtbl.8 d10, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
vtbl.8 d11, {q11}, d3
veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
vtbl.8 d7, {q10}, d1
vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
vtbl.8 d9, {q10}, d3
veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
vtbl.8 d5, {q10}, d7
vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
vtbl.8 d7, {q10}, d9
veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io
veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5
bne .Lenc_loop
@ middle of last round
add r10, r11, #0x80
adr r11, .Lk_sbo
@ Read to q1 instead of q4, so the vtbl.8 instruction below does not
@ overlap table and destination registers.
vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou
vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
vtbl.8 d9, {q1}, d5
vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
@ Write to q2 instead of q0 below, to avoid overlapping table and
@ destination registers.
vtbl.8 d4, {q0}, d6 @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
vtbl.8 d5, {q0}, d7
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A
@ Here we restore the original q0/q2 usage.
vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0
vtbl.8 d1, {q2}, d3
bx lr
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.globl vpaes_encrypt
.hidden vpaes_encrypt
.type vpaes_encrypt,%function
.align 4
vpaes_encrypt:
@ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack
@ alignment.
stmdb sp!, {r7,r8,r9,r10,r11,lr}
@ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved.
vstmdb sp!, {d8,d9,d10,d11}
vld1.64 {q0}, [r0]
bl _vpaes_preheat
bl _vpaes_encrypt_core
vst1.64 {q0}, [r1]
vldmia sp!, {d8,d9,d10,d11}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.size vpaes_encrypt,.-vpaes_encrypt
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@ @@
@@ AES key schedule @@
@@ @@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This function diverges from both x86_64 and armv7 in which constants are
@ pinned. x86_64 has a common preheat function for all operations. aarch64
@ separates them because it has enough registers to pin nearly all constants.
@ armv7 does not have enough registers, but needing explicit loads and stores
@ also complicates using x86_64's register allocation directly.
@
@ We pin some constants for convenience and leave q14 and q15 free to load
@ others on demand.
@
@ Key schedule constants
@
.type _vpaes_key_consts,%object
.align 4
_vpaes_key_consts:
.Lk_rcon:@ rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
.Lk_opt:@ output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
.Lk_deskew:@ deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.size _vpaes_key_consts,.-_vpaes_key_consts
.type _vpaes_key_preheat,%function
.align 4
_vpaes_key_preheat:
adr r11, .Lk_rcon
vmov.i8 q12, #0x5b @ .Lk_s63
adr r10, .Lk_inv @ Must be aligned to 8 mod 16.
vmov.i8 q9, #0x0f @ .Lk_s0F
vld1.64 {q10,q11}, [r10] @ .Lk_inv
vld1.64 {q8}, [r11] @ .Lk_rcon
bx lr
.size _vpaes_key_preheat,.-_vpaes_key_preheat
.type _vpaes_schedule_core,%function
.align 4
_vpaes_schedule_core:
@ We only need to save lr, but ARM requires an 8-byte stack alignment,
@ so save an extra register.
stmdb sp!, {r3,lr}
bl _vpaes_key_preheat @ load the tables
adr r11, .Lk_ipt @ Must be aligned to 8 mod 16.
vld1.64 {q0}, [r0]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned)
@ input transform
@ Use q4 here rather than q3 so .Lschedule_am_decrypting does not
@ overlap table and destination.
vmov q4, q0 @ vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
adr r10, .Lk_sr @ Must be aligned to 8 mod 16.
vmov q7, q0 @ vmovdqa %xmm0, %xmm7
add r8, r8, r10
@ encrypting, output zeroth round key after transform
vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx)
@ *ring*: Decryption removed.
.Lschedule_go:
cmp r1, #192 @ cmp $192, %esi
bhi .Lschedule_256
@ 128: fall though
@@
@@ .schedule_128
@@
@@ 128-bit specific part of key schedule.
@@
@@ This schedule is really simple, because all its parts
@@ are accomplished by the subroutines.
@@
.Lschedule_128:
mov r0, #10 @ mov $10, %esi
.Loop_schedule_128:
bl _vpaes_schedule_round
subs r0, r0, #1 @ dec %esi
beq .Lschedule_mangle_last
bl _vpaes_schedule_mangle @ write output
b .Loop_schedule_128
@@
@@ .aes_schedule_256
@@
@@ 256-bit specific part of key schedule.
@@
@@ The structure here is very similar to the 128-bit
@@ schedule, but with an additional "low side" in
@@ q6. The low side's rounds are the same as the
@@ high side's, except no rcon and no rotation.
@@
.align 4
.Lschedule_256:
vld1.64 {q0}, [r0] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform @ input transform
mov r0, #7 @ mov $7, %esi
.Loop_schedule_256:
bl _vpaes_schedule_mangle @ output low result
vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
@ high round
bl _vpaes_schedule_round
subs r0, r0, #1 @ dec %esi
beq .Lschedule_mangle_last
bl _vpaes_schedule_mangle
@ low round. swap xmm7 and xmm6
vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0
vmov.i8 q4, #0
vmov q5, q7 @ vmovdqa %xmm7, %xmm5
vmov q7, q6 @ vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
vmov q7, q5 @ vmovdqa %xmm5, %xmm7
b .Loop_schedule_256
@@
@@ .aes_schedule_mangle_last
@@
@@ Mangler for last round of key schedule
@@ Mangles q0
@@ when encrypting, outputs out(q0) ^ 63
@@ when decrypting, outputs unskew(q0)
@@
@@ Always called right before return... jumps to cleanup and exits
@@
.align 4
.Lschedule_mangle_last:
@ schedule last round key from xmm0
adr r11, .Lk_deskew @ lea .Lk_deskew(%rip),%r11 # prepare to deskew
@ encrypting
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1
adr r11, .Lk_opt @ lea .Lk_opt(%rip), %r11 # prepare to output transform
add r2, r2, #32 @ add $32, %rdx
vmov q2, q0
vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 # output permute
vtbl.8 d1, {q2}, d3
.Lschedule_mangle_last_dec:
sub r2, r2, #16 @ add $-16, %rdx
veor q0, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform @ output transform
vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) # save last key
@ cleanup
veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0
veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1
veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2
veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3
veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4
veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5
veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6
veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7
ldmia sp!, {r3,pc} @ return
.size _vpaes_schedule_core,.-_vpaes_schedule_core
@@
@@ .aes_schedule_round
@@
@@ Runs one main round of the key schedule on q0, q7
@@
@@ Specifically, runs subbytes on the high dword of q0
@@ then rotates it by one byte and xors into the low dword of
@@ q7.
@@
@@ Adds rcon from low byte of q8, then rotates q8 for
@@ next rcon.
@@
@@ Smears the dwords of q7 by xoring the low into the
@@ second low, result into third, result into highest.
@@
@@ Returns results in q7 = q0.
@@ Clobbers q1-q4, r11.
@@
.type _vpaes_schedule_round,%function
.align 4
_vpaes_schedule_round:
@ extract rcon from xmm8
vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4
vext.8 q1, q8, q4, #15 @ vpalignr $15, %xmm8, %xmm4, %xmm1
vext.8 q8, q8, q8, #15 @ vpalignr $15, %xmm8, %xmm8, %xmm8
veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7
@ rotate
vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0
vext.8 q0, q0, q0, #1 @ vpalignr $1, %xmm0, %xmm0, %xmm0
@ fall through...
@ low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
@ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12.
@ We pin other values in _vpaes_key_preheat, so load them now.
adr r11, .Lk_sb1
vld1.64 {q14,q15}, [r11]
@ smear xmm7
vext.8 q1, q4, q7, #12 @ vpslldq $4, %xmm7, %xmm1
veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7
vext.8 q4, q4, q7, #8 @ vpslldq $8, %xmm7, %xmm4
@ subbytes
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i
veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7
vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
vtbl.8 d5, {q11}, d3
veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
vtbl.8 d7, {q10}, d1
veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
vtbl.8 d9, {q10}, d3
veor q7, q7, q12 @ vpxor .Lk_s63(%rip), %xmm7, %xmm7
vtbl.8 d6, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
vtbl.8 d7, {q10}, d7
veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
vtbl.8 d4, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
vtbl.8 d5, {q10}, d9
veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io
veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
vtbl.8 d8, {q15}, d6 @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
vtbl.8 d9, {q15}, d7
vtbl.8 d2, {q14}, d4 @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
vtbl.8 d3, {q14}, d5
veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
@ add in smeared stuff
veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0
veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7
bx lr
.size _vpaes_schedule_round,.-_vpaes_schedule_round
@@
@@ .aes_schedule_transform
@@
@@ Linear-transform q0 according to tables at [r11]
@@
@@ Requires that q9 = 0x0F0F... as in preheat
@@ Output in q0
@@ Clobbers q1, q2, q14, q15
@@
.type _vpaes_schedule_transform,%function
.align 4
_vpaes_schedule_transform:
vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo
@ vmovdqa 16(%r11), %xmm1 # hi
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0
vtbl.8 d4, {q14}, d2 @ vpshufb %xmm1, %xmm2, %xmm2
vtbl.8 d5, {q14}, d3
vtbl.8 d0, {q15}, d0 @ vpshufb %xmm0, %xmm1, %xmm0
vtbl.8 d1, {q15}, d1
veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
bx lr
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
@@
@@ .aes_schedule_mangle
@@
@@ Mangles q0 from (basis-transformed) standard version
@@ to our version.
@@
@@ On encrypt,
@@ xor with 0x63
@@ multiply by circulant 0,1,1,1
@@ apply shiftrows transform
@@
@@ On decrypt,
@@ xor with 0x63
@@ multiply by "inverse mixcolumns" circulant E,B,D,9
@@ deskew
@@ apply shiftrows transform
@@
@@
@@ Writes out to [r2], and increments or decrements it
@@ Keeps track of round number mod 4 in r8
@@ Preserves q0
@@ Clobbers q1-q5
@@
.type _vpaes_schedule_mangle,%function
.align 4
_vpaes_schedule_mangle:
tst r3, r3
vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later
adr r11, .Lk_mc_forward @ Must be aligned to 8 mod 16.
vld1.64 {q5}, [r11] @ vmovdqa .Lk_mc_forward(%rip),%xmm5
@ encrypting
@ Write to q2 so we do not overlap table and destination below.
veor q2, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm4
add r2, r2, #16 @ add $16, %rdx
vtbl.8 d8, {q2}, d10 @ vpshufb %xmm5, %xmm4, %xmm4
vtbl.8 d9, {q2}, d11
vtbl.8 d2, {q4}, d10 @ vpshufb %xmm5, %xmm4, %xmm1
vtbl.8 d3, {q4}, d11
vtbl.8 d6, {q1}, d10 @ vpshufb %xmm5, %xmm1, %xmm3
vtbl.8 d7, {q1}, d11
veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3
.Lschedule_mangle_both:
@ Write to q2 so table and destination do not overlap.
vtbl.8 d4, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d5, {q3}, d3
add r8, r8, #64-16 @ add $-16, %r8
and r8, r8, #~(1<<6) @ and $0x30, %r8
vst1.64 {q2}, [r2] @ vmovdqu %xmm3, (%rdx)
bx lr
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,%function
.align 4
vpaes_set_encrypt_key:
stmdb sp!, {r7,r8,r9,r10,r11, lr}
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
lsr r9, r1, #5 @ shr $5,%eax
add r9, r9, #5 @ $5,%eax
str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov r3, #0 @ mov $0,%ecx
mov r8, #0x30 @ mov $0x30,%r8d
bl _vpaes_schedule_core
eor r0, r0, r0
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
@ Additional constants for converting to bsaes.
.type _vpaes_convert_consts,%object
.align 4
_vpaes_convert_consts:
@ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear
@ transform in the AES S-box. 0x63 is incorporated into the low half of the
@ table. This was computed with the following script:
@
@ def u64s_to_u128(x, y):
@ return x | (y << 64)
@ def u128_to_u64s(w):
@ return w & ((1<<64)-1), w >> 64
@ def get_byte(w, i):
@ return (w >> (i*8)) & 0xff
@ def apply_table(table, b):
@ lo = b & 0xf
@ hi = b >> 4
@ return get_byte(table[0], lo) ^ get_byte(table[1], hi)
@ def opt(b):
@ table = [
@ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808),
@ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0),
@ ]
@ return apply_table(table, b)
@ def rot_byte(b, n):
@ return 0xff & ((b << n) | (b >> (8-n)))
@ def skew(x):
@ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^
@ rot_byte(x, 4))
@ table = [0, 0]
@ for i in range(16):
@ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8)
@ table[1] |= skew(opt(i<<4)) << (i*8)
@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[0]))
@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[1]))
.Lk_opt_then_skew:
.quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b
.quad 0x1f30062936192f00, 0xb49bad829db284ab
@ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes);
.globl vpaes_encrypt_key_to_bsaes
.hidden vpaes_encrypt_key_to_bsaes
.type vpaes_encrypt_key_to_bsaes,%function
.align 4
vpaes_encrypt_key_to_bsaes:
stmdb sp!, {r11, lr}
@ See _vpaes_schedule_core for the key schedule logic. In particular,
@ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper),
@ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last
@ contain the transformations not in the bsaes representation. This
@ function inverts those transforms.
@
@ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
@ representation, which does not match the other aes_nohw_*
@ implementations. The ARM aes_nohw_* stores each 32-bit word
@ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
@ cost of extra REV and VREV32 operations in little-endian ARM.
vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform
adr r2, .Lk_mc_forward @ Must be aligned to 8 mod 16.
add r3, r2, 0x90 @ .Lk_sr+0x10-.Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression)
vld1.64 {q12}, [r2]
vmov.i8 q10, #0x5b @ .Lk_s63 from vpaes-x86_64
adr r11, .Lk_opt @ Must be aligned to 8 mod 16.
vmov.i8 q11, #0x63 @ .LK_s63 without .Lk_ipt applied
@ vpaes stores one fewer round count than bsaes, but the number of keys
@ is the same.
ldr r2, [r1,#240]
add r2, r2, #1
str r2, [r0,#240]
@ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt).
@ Invert this with .Lk_opt.
vld1.64 {q0}, [r1]!
bl _vpaes_schedule_transform
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
@ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied,
@ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63,
@ multiplies by the circulant 0,1,1,1, then applies ShiftRows.
.Loop_enc_key_to_bsaes:
vld1.64 {q0}, [r1]!
@ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle
@ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30.
@ We use r3 rather than r8 to avoid a callee-saved register.
vld1.64 {q1}, [r3]
vtbl.8 d4, {q0}, d2
vtbl.8 d5, {q0}, d3
add r3, r3, #16
and r3, r3, #~(1<<6)
vmov q0, q2
@ Handle the last key differently.
subs r2, r2, #1
beq .Loop_enc_key_to_bsaes_last
@ Multiply by the circulant. This is its own inverse.
vtbl.8 d2, {q0}, d24
vtbl.8 d3, {q0}, d25
vmov q0, q1
vtbl.8 d4, {q1}, d24
vtbl.8 d5, {q1}, d25
veor q0, q0, q2
vtbl.8 d2, {q2}, d24
vtbl.8 d3, {q2}, d25
veor q0, q0, q1
@ XOR and finish.
veor q0, q0, q10
bl _vpaes_schedule_transform
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
b .Loop_enc_key_to_bsaes
.Loop_enc_key_to_bsaes_last:
@ The final key does not have a basis transform (note
@ .Lschedule_mangle_last inverts the original transform). It only XORs
@ 0x63 and applies ShiftRows. The latter was already inverted in the
@ loop. Note that, because we act on the original representation, we use
@ q11, not q10.
veor q0, q0, q11
vrev32.8 q0, q0
vst1.64 {q0}, [r0]
@ Wipe registers which contained key material.
veor q0, q0, q0
veor q1, q1, q1
veor q2, q2, q2
ldmia sp!, {r11, pc} @ return
.size vpaes_encrypt_key_to_bsaes,.-vpaes_encrypt_key_to_bsaes
.globl vpaes_ctr32_encrypt_blocks
.hidden vpaes_ctr32_encrypt_blocks
.type vpaes_ctr32_encrypt_blocks,%function
.align 4
vpaes_ctr32_encrypt_blocks:
mov ip, sp
stmdb sp!, {r7,r8,r9,r10,r11, lr}
@ This function uses q4-q7 (d8-d15), which are callee-saved.
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
cmp r2, #0
@ r8 is passed on the stack.
ldr r8, [ip]
beq .Lctr32_done
@ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3.
mov r9, r3
mov r3, r2
mov r2, r9
@ Load the IV and counter portion.
ldr r7, [r8, #12]
vld1.8 {q7}, [r8]
bl _vpaes_preheat
rev r7, r7 @ The counter is big-endian.
.Lctr32_loop:
vmov q0, q7
vld1.8 {q6}, [r0]! @ .Load input ahead of time
bl _vpaes_encrypt_core
veor q0, q0, q6 @ XOR input and result
vst1.8 {q0}, [r1]!
subs r3, r3, #1
@ Update the counter.
add r7, r7, #1
rev r9, r7
vmov.32 d15[1], r9
bne .Lctr32_loop
.Lctr32_done:
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
chairq/First-choice
| 8,262
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/aesv8-armx-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#include <ring-core/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.section __TEXT,__const
.align 5
Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl _aes_hw_set_encrypt_key
.private_extern _aes_hw_set_encrypt_key
.align 5
_aes_hw_set_encrypt_key:
Lenc_key:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x3,#-1
cmp x0,#0
b.eq Lenc_key_abort
cmp x2,#0
b.eq Lenc_key_abort
mov x3,#-2
cmp w1,#128
b.lt Lenc_key_abort
cmp w1,#256
b.gt Lenc_key_abort
tst w1,#0x3f
b.ne Lenc_key_abort
adrp x3,Lrcon@PAGE
add x3,x3,Lrcon@PAGEOFF
cmp w1,#192
eor v0.16b,v0.16b,v0.16b
ld1 {v3.16b},[x0],#16
mov w1,#8 // reuse w1
ld1 {v1.4s,v2.4s},[x3],#32
b.lt Loop128
// 192-bit key support was removed.
b L256
.align 4
Loop128:
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
b.ne Loop128
ld1 {v1.4s},[x3]
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2]
add x2,x2,#0x50
mov w12,#10
b Ldone
// 192-bit key support was removed.
.align 4
L256:
ld1 {v4.16b},[x0]
mov w1,#7
mov w12,#14
st1 {v3.4s},[x2],#16
Loop256:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2],#16
b.eq Ldone
dup v6.4s,v3.s[3] // just splat
ext v5.16b,v0.16b,v4.16b,#12
aese v6.16b,v0.16b
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
eor v4.16b,v4.16b,v6.16b
b Loop256
Ldone:
str w12,[x2]
mov x3,#0
Lenc_key_abort:
mov x0,x3 // return value
ldr x29,[sp],#16
ret
.globl _aes_hw_encrypt
.private_extern _aes_hw_encrypt
.align 5
_aes_hw_encrypt:
AARCH64_VALID_CALL_TARGET
ldr w3,[x2,#240]
ld1 {v0.4s},[x2],#16
ld1 {v2.16b},[x0]
sub w3,w3,#2
ld1 {v1.4s},[x2],#16
Loop_enc:
aese v2.16b,v0.16b
aesmc v2.16b,v2.16b
ld1 {v0.4s},[x2],#16
subs w3,w3,#2
aese v2.16b,v1.16b
aesmc v2.16b,v2.16b
ld1 {v1.4s},[x2],#16
b.gt Loop_enc
aese v2.16b,v0.16b
aesmc v2.16b,v2.16b
ld1 {v0.4s},[x2]
aese v2.16b,v1.16b
eor v2.16b,v2.16b,v0.16b
st1 {v2.16b},[x1]
ret
.globl _aes_hw_ctr32_encrypt_blocks
.private_extern _aes_hw_ctr32_encrypt_blocks
.align 5
_aes_hw_ctr32_encrypt_blocks:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldr w5,[x3,#240]
ldr w8, [x4, #12]
ld1 {v0.4s},[x4]
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#4
mov x12,#16
cmp x2,#2
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
sub w5,w5,#2
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
csel x12,xzr,x12,lo
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __AARCH64EB__
rev w8, w8
#endif
add w10, w8, #1
orr v6.16b,v0.16b,v0.16b
rev w10, w10
mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls Lctr32_tail
rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias
orr v18.16b,v6.16b,v6.16b
b Loop3x_ctr32
.align 4
Loop3x_ctr32:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v18.16b,v17.16b
aesmc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Loop3x_ctr32
aese v0.16b,v16.16b
aesmc v4.16b,v0.16b
aese v1.16b,v16.16b
aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16
add w9,w8,#1
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
rev w9,w9
aese v4.16b,v17.16b
aesmc v4.16b,v4.16b
aese v5.16b,v17.16b
aesmc v5.16b,v5.16b
ld1 {v19.16b},[x0],#16
mov x7,x3
aese v18.16b,v17.16b
aesmc v17.16b,v18.16b
aese v4.16b,v20.16b
aesmc v4.16b,v4.16b
aese v5.16b,v20.16b
aesmc v5.16b,v5.16b
eor v2.16b,v2.16b,v7.16b
add w10,w8,#2
aese v17.16b,v20.16b
aesmc v17.16b,v17.16b
eor v3.16b,v3.16b,v7.16b
add w8,w8,#3
aese v4.16b,v21.16b
aesmc v4.16b,v4.16b
aese v5.16b,v21.16b
aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b
mov v6.s[3], w9
aese v17.16b,v21.16b
aesmc v17.16b,v17.16b
orr v0.16b,v6.16b,v6.16b
rev w10,w10
aese v4.16b,v22.16b
aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b
aesmc v5.16b,v5.16b
orr v1.16b,v6.16b,v6.16b
mov v6.s[3], w12
aese v17.16b,v22.16b
aesmc v17.16b,v17.16b
orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3
aese v4.16b,v23.16b
aese v5.16b,v23.16b
aese v17.16b,v23.16b
eor v2.16b,v2.16b,v4.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
st1 {v2.16b},[x1],#16
eor v3.16b,v3.16b,v5.16b
mov w6,w5
st1 {v3.16b},[x1],#16
eor v19.16b,v19.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v19.16b},[x1],#16
b.hs Loop3x_ctr32
adds x2,x2,#3
b.eq Lctr32_done
cmp x2,#1
mov x12,#16
csel x12,xzr,x12,eq
Lctr32_tail:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v17.4s},[x7],#16
b.gt Lctr32_tail
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v2.16b},[x0],x12
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
ld1 {v3.16b},[x0]
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
eor v2.16b,v2.16b,v7.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
eor v3.16b,v3.16b,v7.16b
aese v0.16b,v23.16b
aese v1.16b,v23.16b
cmp x2,#1
eor v2.16b,v2.16b,v0.16b
eor v3.16b,v3.16b,v1.16b
st1 {v2.16b},[x1],#16
b.eq Lctr32_done
st1 {v3.16b},[x1]
Lctr32_done:
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
chairq/First-choice
| 36,779
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/p256-armv8-asm-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#include "ring-core/arm_arch.h"
.section .rodata
.align 5
.Lpoly:
.quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001
.LRR: // 2^512 mod P precomputed for NIST P256 polynomial
.quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd
.Lone_mont:
.quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
.Lone:
.quad 1,0,0,0
.Lord:
.quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000
.LordK:
.quad 0xccd1c8aaee00bc4f
.byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
// void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
// const BN_ULONG x2[4]);
.globl ecp_nistz256_mul_mont
.hidden ecp_nistz256_mul_mont
.type ecp_nistz256_mul_mont,%function
.align 4
ecp_nistz256_mul_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_mul_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
// void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_sqr_mont
.hidden ecp_nistz256_sqr_mont
.type ecp_nistz256_sqr_mont,%function
.align 4
ecp_nistz256_sqr_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sqr_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
// void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_neg
.hidden ecp_nistz256_neg
.type ecp_nistz256_neg,%function
.align 4
ecp_nistz256_neg:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x2,x1
mov x14,xzr // a = 0
mov x15,xzr
mov x16,xzr
mov x17,xzr
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sub_from
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_neg,.-ecp_nistz256_neg
// note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
// to x4-x7 and b[0] - to x3
.type __ecp_nistz256_mul_mont,%function
.align 4
__ecp_nistz256_mul_mont:
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x11,x7,x3
ldr x3,[x2,#8] // b[1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adc x19,xzr,x11
mov x20,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(1+1)] // b[1+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(2+1)] // b[2+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
// last reduction
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adcs x17,x19,x11
adc x19,x20,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
// note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
// to x4-x7
.type __ecp_nistz256_sqr_mont,%function
.align 4
__ecp_nistz256_sqr_mont:
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x2,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
lsl x8,x14,#32
adcs x1,x1,x11
lsr x9,x14,#32
adc x2,x2,x7
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adc x17,x11,xzr // can't overflow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x2
adc x19,xzr,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_sqr_mont,.-__ecp_nistz256_sqr_mont
// Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to
// x4-x7 and x8-x11. This is done because it's used in multiple
// contexts, e.g. in multiplication by 2 and 3...
.type __ecp_nistz256_add_to,%function
.align 4
__ecp_nistz256_add_to:
adds x14,x14,x8 // ret = a+b
adcs x15,x15,x9
adcs x16,x16,x10
adcs x17,x17,x11
adc x1,xzr,xzr // zap x1
adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x1,xzr // did subtraction borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_add_to,.-__ecp_nistz256_add_to
.type __ecp_nistz256_sub_from,%function
.align 4
__ecp_nistz256_sub_from:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x14,x8 // ret = a-b
sbcs x15,x15,x9
sbcs x16,x16,x10
sbcs x17,x17,x11
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
.type __ecp_nistz256_sub_morf,%function
.align 4
__ecp_nistz256_sub_morf:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x8,x14 // ret = b-a
sbcs x15,x9,x15
sbcs x16,x10,x16
sbcs x17,x11,x17
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
.type __ecp_nistz256_div_by_2,%function
.align 4
__ecp_nistz256_div_by_2:
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adcs x11,x17,x13
adc x1,xzr,xzr // zap x1
tst x14,#1 // is a even?
csel x14,x14,x8,eq // ret = even ? a : a+modulus
csel x15,x15,x9,eq
csel x16,x16,x10,eq
csel x17,x17,x11,eq
csel x1,xzr,x1,eq
lsr x14,x14,#1 // ret >>= 1
orr x14,x14,x15,lsl#63
lsr x15,x15,#1
orr x15,x15,x16,lsl#63
lsr x16,x16,#1
orr x16,x16,x17,lsl#63
lsr x17,x17,#1
stp x14,x15,[x0]
orr x17,x17,x1,lsl#63
stp x16,x17,[x0,#16]
ret
.size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
.globl ecp_nistz256_point_double
.hidden ecp_nistz256_point_double
.type ecp_nistz256_point_double,%function
.align 5
ecp_nistz256_point_double:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
sub sp,sp,#32*4
.Ldouble_shortcut:
ldp x14,x15,[x1,#32]
mov x21,x0
ldp x16,x17,[x1,#48]
mov x22,x1
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
mov x8,x14
ldr x13,[x13,#24]
mov x9,x15
ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[x22,#64+16]
add x0,sp,#0
bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y);
add x0,sp,#64
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z);
ldp x8,x9,[x22]
ldp x10,x11,[x22,#16]
mov x4,x14 // put Zsqr aside for p256_sub
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x);
add x2,x22,#0
mov x14,x4 // restore Zsqr
mov x15,x5
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x16,x6
mov x17,x7
ldp x6,x7,[sp,#0+16]
add x0,sp,#64
bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr);
add x0,sp,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S);
ldr x3,[x22,#32]
ldp x4,x5,[x22,#64]
ldp x6,x7,[x22,#64+16]
add x2,x22,#32
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#0+16]
add x0,x21,#64
bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0);
add x0,sp,#96
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S);
ldr x3,[sp,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x0,x21,#32
bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0);
add x2,sp,#64
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr);
mov x8,x14 // duplicate M
mov x9,x15
mov x10,x16
mov x11,x17
mov x4,x14 // put M aside
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to
mov x8,x4 // restore M
mov x9,x5
ldr x3,[x22] // forward load for p256_mul_mont
mov x10,x6
ldp x4,x5,[sp,#0]
mov x11,x7
ldp x6,x7,[sp,#0+16]
bl __ecp_nistz256_add_to // p256_mul_by_3(M, M);
add x2,x22,#0
add x0,sp,#0
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#32+16]
add x0,sp,#96
bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S);
add x0,x21,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M);
add x2,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0);
add x2,sp,#0
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x);
ldr x3,[sp,#32]
mov x4,x14 // copy S
mov x5,x15
mov x6,x16
mov x7,x17
add x2,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M);
add x2,x21,#32
add x0,x21,#32
bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y);
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_point_double,.-ecp_nistz256_point_double
.globl ecp_nistz256_point_add
.hidden ecp_nistz256_point_add
.type ecp_nistz256_point_add,%function
.align 5
ecp_nistz256_point_add:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#32*12
ldp x4,x5,[x2,#64] // in2_z
ldp x6,x7,[x2,#64+16]
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
orr x8,x4,x5
orr x10,x6,x7
orr x25,x8,x10
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z);
ldp x4,x5,[x22,#64] // in1_z
ldp x6,x7,[x22,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x2,x23,#64
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x22,#64
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#32]
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x2,x22,#32
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#352]
ldp x6,x7,[sp,#352+16]
add x2,x23,#32
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,sp,#320
ldr x3,[sp,#192] // forward load for p256_mul_mont
ldp x4,x5,[x22]
ldp x6,x7,[x22,#16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x26,x14,x16 // ~is_equal(S1,S2)
add x2,sp,#192
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr);
ldr x3,[sp,#128]
ldp x4,x5,[x23]
ldp x6,x7,[x23,#16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr);
add x2,sp,#256
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x14,x14,x16 // ~is_equal(U1,U2)
mvn x27,x24 // -1/0 -> 0/-1
mvn x28,x25 // -1/0 -> 0/-1
orr x14,x14,x27
orr x14,x14,x28
orr x14,x14,x26
cbnz x14,.Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2))
.Ladd_double:
mov x1,x22
mov x0,x21
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames
b .Ldouble_shortcut
.align 4
.Ladd_proceed:
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#64]
ldp x6,x7,[sp,#64+16]
add x2,x23,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z);
ldr x3,[sp,#96]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,sp,#96
add x0,sp,#224
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[sp,#128]
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#128
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#192
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#224
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#288
ldr x3,[sp,#224] // forward load for p256_mul_mont
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,sp,#224
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#160
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#352
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
.Ladd_done:
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_point_add,.-ecp_nistz256_point_add
.globl ecp_nistz256_point_add_affine
.hidden ecp_nistz256_point_add_affine
.type ecp_nistz256_point_add_affine,%function
.align 5
ecp_nistz256_point_add_affine:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-80]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
sub sp,sp,#32*10
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,.Lpoly
add x13,x13,:lo12:.Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
ldp x4,x5,[x1,#64] // in1_z
ldp x6,x7,[x1,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
ldp x14,x15,[x2] // in2_x
ldp x16,x17,[x2,#16]
ldp x8,x9,[x2,#32] // in2_y
ldp x10,x11,[x2,#48]
orr x14,x14,x15
orr x16,x16,x17
orr x8,x8,x9
orr x10,x10,x11
orr x14,x14,x16
orr x8,x8,x10
orr x25,x14,x8
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
mov x4,x14
mov x5,x15
mov x6,x16
mov x7,x17
ldr x3,[x23]
add x2,x23,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x);
add x2,x22,#0
ldr x3,[x22,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x);
add x2,x22,#64
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#160]
ldp x6,x7,[sp,#160+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x23,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,x22,#32
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#192
bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y);
add x0,sp,#224
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x0,sp,#288
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,sp,#160
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[x22]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,x22,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#224
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#288
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#256
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#96
ldr x3,[x22,#32] // forward load for p256_mul_mont
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,x22,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub);
ldr x3,[sp,#192]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#192
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#128
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
adrp x23,.Lone_mont-64
add x23,x23,:lo12:.Lone_mont-64
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x29,x30,[sp],#80
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
// uint64_t b[4]);
.globl ecp_nistz256_ord_mul_mont
.hidden ecp_nistz256_ord_mul_mont
.type ecp_nistz256_ord_mul_mont,%function
.align 4
ecp_nistz256_ord_mul_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,.Lord
add x23,x23,:lo12:.Lord
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x19,x7,x3
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts of multiplication
adcs x16,x16,x9
adcs x17,x17,x10
adc x19,x19,xzr
mov x20,xzr
ldr x3,[x2,#8*1] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*2] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*3] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
lsl x8,x24,#32 // last reduction
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
.size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
// uint64_t rep);
.globl ecp_nistz256_ord_sqr_mont
.hidden ecp_nistz256_ord_sqr_mont
.type ecp_nistz256_ord_sqr_mont,%function
.align 4
ecp_nistz256_ord_sqr_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,.Lord
add x23,x23,:lo12:.Lord
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
b .Loop_ord_sqr
.align 4
.Loop_ord_sqr:
sub x2,x2,#1
////////////////////////////////////////////////////////////////
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x3,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
mul x24,x14,x23
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
adcs x1,x1,x11
adc x3,x3,x7
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
mul x24,x14,x23
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x3
adc x19,xzr,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x5,x15,x9,lo
csel x6,x16,x10,lo
csel x7,x17,x11,lo
cbnz x2,.Loop_ord_sqr
stp x4,x5,[x0]
stp x6,x7,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
.size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index);
.globl ecp_nistz256_select_w5
.hidden ecp_nistz256_select_w5
.type ecp_nistz256_select_w5,%function
.align 4
ecp_nistz256_select_w5:
AARCH64_VALID_CALL_TARGET
// x10 := x0
// w9 := 0; loop counter and incremented internal index
mov x10, x0
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
movi v20.16b, #0
movi v21.16b, #0
.Lselect_w5_loop:
// Loop 16 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// continue loading ...
ld1 {v26.2d, v27.2d}, [x1],#32
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
bit v20.16b, v26.16b, v3.16b
bit v21.16b, v27.16b, v3.16b
// If bit #4 is not 0 (i.e. idx_ctr < 16) loop back
tbz w9, #4, .Lselect_w5_loop
// Write [v16-v21] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64
st1 {v20.2d, v21.2d}, [x10]
ret
.size ecp_nistz256_select_w5,.-ecp_nistz256_select_w5
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index);
.globl ecp_nistz256_select_w7
.hidden ecp_nistz256_select_w7
.type ecp_nistz256_select_w7,%function
.align 4
ecp_nistz256_select_w7:
AARCH64_VALID_CALL_TARGET
// w9 := 0; loop counter and incremented internal index
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
.Lselect_w7_loop:
// Loop 64 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
// If bit #6 is not 0 (i.e. idx_ctr < 64) loop back
tbz w9, #6, .Lselect_w7_loop
// Write [v16-v19] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0]
ret
.size ecp_nistz256_select_w7,.-ecp_nistz256_select_w7
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
chairq/First-choice
| 9,856
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/vpaes-x86-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.align 64
.L_vpaes_consts:
.long 218628480,235210255,168496130,67568393
.long 252381056,17041926,33884169,51187212
.long 252645135,252645135,252645135,252645135
.long 1512730624,3266504856,1377990664,3401244816
.long 830229760,1275146365,2969422977,3447763452
.long 3411033600,2979783055,338359620,2782886510
.long 4209124096,907596821,221174255,1006095553
.long 191964160,3799684038,3164090317,1589111125
.long 182528256,1777043520,2877432650,3265356744
.long 1874708224,3503451415,3305285752,363511674
.long 1606117888,3487855781,1093350906,2384367825
.long 197121,67569157,134941193,202313229
.long 67569157,134941193,202313229,197121
.long 134941193,202313229,197121,67569157
.long 202313229,197121,67569157,134941193
.long 33619971,100992007,168364043,235736079
.long 235736079,33619971,100992007,168364043
.long 168364043,235736079,33619971,100992007
.long 100992007,168364043,235736079,33619971
.long 50462976,117835012,185207048,252579084
.long 252314880,51251460,117574920,184942860
.long 184682752,252054788,50987272,118359308
.long 118099200,185467140,251790600,50727180
.long 2946363062,528716217,1300004225,1881839624
.long 1532713819,1532713819,1532713819,1532713819
.long 3602276352,4288629033,3737020424,4153884961
.long 1354558464,32357713,2958822624,3775749553
.long 1201988352,132424512,1572796698,503232858
.long 2213177600,1597421020,4103937655,675398315
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105
.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83
.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117
.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105
.byte 118,101,114,115,105,116,121,41,0
.align 64
.hidden _vpaes_preheat
.type _vpaes_preheat,@function
.align 16
_vpaes_preheat:
addl (%esp),%ebp
movdqa -48(%ebp),%xmm7
movdqa -16(%ebp),%xmm6
ret
.size _vpaes_preheat,.-_vpaes_preheat
.hidden _vpaes_encrypt_core
.type _vpaes_encrypt_core,@function
.align 16
_vpaes_encrypt_core:
movl $16,%ecx
movl 240(%edx),%eax
movdqa %xmm6,%xmm1
movdqa (%ebp),%xmm2
pandn %xmm0,%xmm1
pand %xmm6,%xmm0
movdqu (%edx),%xmm5
.byte 102,15,56,0,208
movdqa 16(%ebp),%xmm0
pxor %xmm5,%xmm2
psrld $4,%xmm1
addl $16,%edx
.byte 102,15,56,0,193
leal 192(%ebp),%ebx
pxor %xmm2,%xmm0
jmp .L000enc_entry
.align 16
.L001enc_loop:
movdqa 32(%ebp),%xmm4
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa 64(%ebp),%xmm5
pxor %xmm4,%xmm0
movdqa -64(%ebx,%ecx,1),%xmm1
.byte 102,15,56,0,234
movdqa 80(%ebp),%xmm2
movdqa (%ebx,%ecx,1),%xmm4
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addl $16,%edx
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addl $16,%ecx
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andl $48,%ecx
subl $1,%eax
pxor %xmm3,%xmm0
.L000enc_entry:
movdqa %xmm6,%xmm1
movdqa -32(%ebp),%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm6,%xmm0
.byte 102,15,56,0,232
movdqa %xmm7,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm7,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm7,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm7,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%edx),%xmm5
pxor %xmm1,%xmm3
jnz .L001enc_loop
movdqa 96(%ebp),%xmm4
movdqa 112(%ebp),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%ebx,%ecx,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.hidden _vpaes_schedule_core
.type _vpaes_schedule_core,@function
.align 16
_vpaes_schedule_core:
addl (%esp),%ebp
movdqu (%esi),%xmm0
movdqa 320(%ebp),%xmm2
movdqa %xmm0,%xmm3
leal (%ebp),%ebx
movdqa %xmm2,4(%esp)
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
testl %edi,%edi
jnz .L002schedule_am_decrypting
movdqu %xmm0,(%edx)
jmp .L003schedule_go
.L002schedule_am_decrypting:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
movdqu %xmm3,(%edx)
xorl $48,%ecx
.L003schedule_go:
cmpl $192,%eax
ja .L004schedule_256
.L005schedule_128:
movl $10,%eax
.L006loop_schedule_128:
call _vpaes_schedule_round
decl %eax
jz .L007schedule_mangle_last
call _vpaes_schedule_mangle
jmp .L006loop_schedule_128
.align 16
.L004schedule_256:
movdqu 16(%esi),%xmm0
call _vpaes_schedule_transform
movl $7,%eax
.L008loop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decl %eax
jz .L007schedule_mangle_last
call _vpaes_schedule_mangle
pshufd $255,%xmm0,%xmm0
movdqa %xmm7,20(%esp)
movdqa %xmm6,%xmm7
call .L_vpaes_schedule_low_round
movdqa 20(%esp),%xmm7
jmp .L008loop_schedule_256
.align 16
.L007schedule_mangle_last:
leal 384(%ebp),%ebx
testl %edi,%edi
jnz .L009schedule_mangle_last_dec
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,193
leal 352(%ebp),%ebx
addl $32,%edx
.L009schedule_mangle_last_dec:
addl $-16,%edx
pxor 336(%ebp),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%edx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.size _vpaes_schedule_core,.-_vpaes_schedule_core
.hidden _vpaes_schedule_round
.type _vpaes_schedule_round,@function
.align 16
_vpaes_schedule_round:
movdqa 8(%esp),%xmm2
pxor %xmm1,%xmm1
.byte 102,15,58,15,202,15
.byte 102,15,58,15,210,15
pxor %xmm1,%xmm7
pshufd $255,%xmm0,%xmm0
.byte 102,15,58,15,192,1
movdqa %xmm2,8(%esp)
.L_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor 336(%ebp),%xmm7
movdqa -16(%ebp),%xmm4
movdqa -48(%ebp),%xmm5
movdqa %xmm4,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm4,%xmm0
movdqa -32(%ebp),%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm5,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm5,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm5,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa 32(%ebp),%xmm4
.byte 102,15,56,0,226
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.size _vpaes_schedule_round,.-_vpaes_schedule_round
.hidden _vpaes_schedule_transform
.type _vpaes_schedule_transform,@function
.align 16
_vpaes_schedule_transform:
movdqa -16(%ebp),%xmm2
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
movdqa (%ebx),%xmm2
.byte 102,15,56,0,208
movdqa 16(%ebx),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
.hidden _vpaes_schedule_mangle
.type _vpaes_schedule_mangle,@function
.align 16
_vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa 128(%ebp),%xmm5
testl %edi,%edi
jnz .L010schedule_mangle_dec
addl $16,%edx
pxor 336(%ebp),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
jmp .L011schedule_mangle_both
.align 16
.L010schedule_mangle_dec:
movdqa -16(%ebp),%xmm2
leal (%ebp),%esi
movdqa %xmm2,%xmm1
pandn %xmm4,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm4
movdqa (%esi),%xmm2
.byte 102,15,56,0,212
movdqa 16(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 32(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 48(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 64(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 80(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 96(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 112(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
addl $-16,%edx
.L011schedule_mangle_both:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
addl $-16,%ecx
andl $48,%ecx
movdqu %xmm3,(%edx)
ret
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,@function
.align 16
vpaes_set_encrypt_key:
.L_vpaes_set_encrypt_key_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L012pic
.L012pic:
popl %ebx
leal BORINGSSL_function_hit+5-.L012pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%eax
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movl %eax,%ebx
shrl $5,%ebx
addl $5,%ebx
movl %ebx,240(%edx)
movl $48,%ecx
movl $0,%edi
leal .L_vpaes_consts+0x30-.L013pic_point,%ebp
call _vpaes_schedule_core
.L013pic_point:
movl 48(%esp),%esp
xorl %eax,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin
.globl vpaes_encrypt
.hidden vpaes_encrypt
.type vpaes_encrypt,@function
.align 16
vpaes_encrypt:
.L_vpaes_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L014pic
.L014pic:
popl %ebx
leal BORINGSSL_function_hit+4-.L014pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
leal .L_vpaes_consts+0x30-.L015pic_point,%ebp
call _vpaes_preheat
.L015pic_point:
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%edi
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movdqu (%esi),%xmm0
call _vpaes_encrypt_core
movdqu %xmm0,(%edi)
movl 48(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_encrypt,.-.L_vpaes_encrypt_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
chairq/First-choice
| 82,184
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/aesv8-gcm-armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#include <ring-core/arm_arch.h>
#if __ARM_MAX_ARCH__ >= 8
.text
.globl _aes_gcm_enc_kernel
.private_extern _aes_gcm_enc_kernel
.align 4
_aes_gcm_enc_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
add x4, x0, x1, lsr #3 // end_input_ptr
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
sub x5, x5, #1 // byte_len - 1
ldr q18, [x8, #0] // load rk0
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
ldr q25, [x8, #112] // load rk7
add x5, x5, x0
lsr x12, x11, #32
fmov d2, x10 // CTR block 2
orr w11, w11, w11
rev w12, w12 // rev_ctr32
fmov d1, x10 // CTR block 1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
add w12, w12, #1 // increment rev_ctr32
rev w9, w12 // CTR block 1
fmov d3, x10 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 1
add w12, w12, #1 // CTR block 1
ldr q19, [x8, #16] // load rk1
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
ldr q20, [x8, #32] // load rk2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
orr x9, x11, x9, lsl #32 // CTR block 3
fmov v3.d[1], x9 // CTR block 3
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q21, [x8, #48] // load rk3
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q24, [x8, #96] // load rk6
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q23, [x8, #80] // load rk5
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q22, [x8, #64] // load rk4
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
ldr q29, [x8, #176] // load rk11
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
ldr q26, [x8, #128] // load rk8
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
add w12, w12, #1 // CTR block 3
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
ldr q27, [x8, #144] // load rk9
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
ldr q28, [x8, #160] // load rk10
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
b.lt Lenc_finish_first_blocks // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
b.eq Lenc_finish_first_blocks // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Lenc_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v2.16b, v31.16b // AES block 2 - round N-1
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
aese v3.16b, v31.16b // AES block 3 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
b.ge Lenc_tail // handle tail
ldp x19, x20, [x0, #16] // AES block 1 - load plaintext
rev w9, w12 // CTR block 4
ldp x6, x7, [x0, #0] // AES block 0 - load plaintext
ldp x23, x24, [x0, #48] // AES block 3 - load plaintext
ldp x21, x22, [x0, #32] // AES block 2 - load plaintext
add x0, x0, #64 // AES input_ptr update
eor x19, x19, x13 // AES block 1 - round N low
eor x20, x20, x14 // AES block 1 - round N high
fmov d5, x19 // AES block 1 - mov low
eor x6, x6, x13 // AES block 0 - round N low
eor x7, x7, x14 // AES block 0 - round N high
eor x24, x24, x14 // AES block 3 - round N high
fmov d4, x6 // AES block 0 - mov low
cmp x0, x5 // check if we have <= 8 blocks
fmov v4.d[1], x7 // AES block 0 - mov high
eor x23, x23, x13 // AES block 3 - round N low
eor x21, x21, x13 // AES block 2 - round N low
fmov v5.d[1], x20 // AES block 1 - mov high
fmov d6, x21 // AES block 2 - mov low
add w12, w12, #1 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov d7, x23 // AES block 3 - mov low
eor x22, x22, x14 // AES block 2 - round N high
fmov v6.d[1], x22 // AES block 2 - mov high
eor v4.16b, v4.16b, v0.16b // AES block 0 - result
fmov d0, x10 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
eor v5.16b, v5.16b, v1.16b // AES block 1 - result
fmov d1, x10 // CTR block 5
orr x9, x11, x9, lsl #32 // CTR block 5
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
st1 { v4.16b}, [x2], #16 // AES block 0 - store result
fmov v7.d[1], x24 // AES block 3 - mov high
orr x9, x11, x9, lsl #32 // CTR block 6
eor v6.16b, v6.16b, v2.16b // AES block 2 - result
st1 { v5.16b}, [x2], #16 // AES block 1 - store result
add w12, w12, #1 // CTR block 6
fmov d2, x10 // CTR block 6
fmov v2.d[1], x9 // CTR block 6
st1 { v6.16b}, [x2], #16 // AES block 2 - store result
rev w9, w12 // CTR block 7
orr x9, x11, x9, lsl #32 // CTR block 7
eor v7.16b, v7.16b, v3.16b // AES block 3 - result
st1 { v7.16b}, [x2], #16 // AES block 3 - store result
b.ge Lenc_prepretail // do prepretail
Lenc_main_loop: // main loop start
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d3, x10 // CTR block 4k+3
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
fmov v3.d[1], x9 // CTR block 4k+3
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
eor v4.16b, v4.16b, v11.16b // PRE 1
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x23, x23, x13 // AES block 4k+7 - round N low
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d10, v17.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
eor x22, x22, x14 // AES block 4k+6 - round N high
mov d8, v4.d[1] // GHASH block 4k - mid
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor x19, x19, x13 // AES block 4k+5 - round N low
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
eor x21, x21, x13 // AES block 4k+6 - round N low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
movi v8.8b, #0xc2
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
fmov d5, x19 // AES block 4k+5 - mov low
ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext
b.lt Lenc_main_loop_continue // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Lenc_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Lenc_main_loop_continue:
shl d8, d8, #56 // mod_constant
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
add w12, w12, #1 // CTR block 4k+3
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
add x0, x0, #64 // AES input_ptr update
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
rev w9, w12 // CTR block 4k+8
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor x6, x6, x13 // AES block 4k+4 - round N low
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
eor x7, x7, x14 // AES block 4k+4 - round N high
fmov d4, x6 // AES block 4k+4 - mov low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid
eor x20, x20, x14 // AES block 4k+5 - round N high
eor x24, x24, x14 // AES block 4k+7 - round N high
add w12, w12, #1 // CTR block 4k+8
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
fmov d7, x23 // AES block 4k+7 - mov low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
fmov v5.d[1], x20 // AES block 4k+5 - mov high
fmov d6, x21 // AES block 4k+6 - mov low
cmp x0, x5 // LOOP CONTROL
fmov v6.d[1], x22 // AES block 4k+6 - mov high
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
rev w9, w12 // CTR block 4k+9
add w12, w12, #1 // CTR block 4k+9
eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result
fmov d1, x10 // CTR block 4k+9
orr x9, x11, x9, lsl #32 // CTR block 4k+9
fmov v1.d[1], x9 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
rev w9, w12 // CTR block 4k+10
st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result
orr x9, x11, x9, lsl #32 // CTR block 4k+10
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
fmov v7.d[1], x24 // AES block 4k+7 - mov high
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result
fmov d2, x10 // CTR block 4k+10
st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result
fmov v2.d[1], x9 // CTR block 4k+10
rev w9, w12 // CTR block 4k+11
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
orr x9, x11, x9, lsl #32 // CTR block 4k+11
eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result
st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result
b.lt Lenc_main_loop
Lenc_prepretail: // PREPRETAIL
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov d3, x10 // CTR block 4k+3
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
fmov v3.d[1], x9 // CTR block 4k+3
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
add w12, w12, #1 // CTR block 4k+3
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v10.16b, v10.16b, v9.16b // karatsuba tidy up
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
pmull v4.1q, v9.1d, v8.1d
ext v9.16b, v9.16b, v9.16b, #8
eor v10.16b, v10.16b, v11.16b
b.lt Lenc_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
b.eq Lenc_finish_prepretail // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
Lenc_finish_prepretail:
eor v10.16b, v10.16b, v4.16b
eor v10.16b, v10.16b, v9.16b
pmull v4.1q, v10.1d, v8.1d
ext v10.16b, v10.16b, v10.16b, #8
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
eor v11.16b, v11.16b, v4.16b
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b
Lenc_tail: // TAIL
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
cmp x5, #48
fmov d4, x6 // AES block 4k+4 - mov low
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result
b.gt Lenc_blocks_more_than_3
cmp x5, #32
mov v3.16b, v2.16b
movi v11.8b, #0
movi v9.8b, #0
sub w12, w12, #1
mov v2.16b, v1.16b
movi v10.8b, #0
b.gt Lenc_blocks_more_than_2
mov v3.16b, v1.16b
sub w12, w12, #1
cmp x5, #16
b.gt Lenc_blocks_more_than_1
sub w12, w12, #1
b Lenc_blocks_less_than_1
Lenc_blocks_more_than_3: // blocks left > 3
st1 { v5.16b}, [x2], #16 // AES final-3 block - store result
ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-3 block
eor x6, x6, x13 // AES final-2 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor x7, x7, x14 // AES final-2 block - round N high
mov d22, v4.d[1] // GHASH final-3 block - mid
fmov d5, x6 // AES final-2 block - mov low
fmov v5.d[1], x7 // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
mov d10, v17.d[1] // GHASH final-3 block - mid
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor v5.16b, v5.16b, v1.16b // AES final-2 block - result
Lenc_blocks_more_than_2: // blocks left > 2
st1 { v5.16b}, [x2], #16 // AES final-2 block - store result
ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-2 block
eor x6, x6, x13 // AES final-1 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
fmov d5, x6 // AES final-1 block - mov low
eor x7, x7, x14 // AES final-1 block - round N high
fmov v5.d[1], x7 // AES final-1 block - mov high
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
eor v5.16b, v5.16b, v2.16b // AES final-1 block - result
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
Lenc_blocks_more_than_1: // blocks left > 1
st1 { v5.16b}, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ldp x6, x7, [x0], #16 // AES final block - load input low & high
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
eor x6, x6, x13 // AES final block - round N low
mov d22, v4.d[1] // GHASH final-1 block - mid
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor x7, x7, x14 // AES final block - round N high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
fmov d5, x6 // AES final block - mov low
fmov v5.d[1], x7 // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
eor v5.16b, v5.16b, v3.16b // AES final block - result
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
Lenc_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored
mvn x14, xzr // rkN_h = 0xffffffffffffffff
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x6, x13, x14, lt
csel x7, x14, xzr, lt
fmov d0, x6 // ctr0b is mask for last block
fmov v0.d[1], x7
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
mov d8, v4.d[1] // GHASH final block - mid
rev w9, w12
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
str w9, [x16, #12] // store the updated counter
st1 { v5.16b}, [x2] // store all 16B
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _aes_gcm_dec_kernel
.private_extern _aes_gcm_dec_kernel
.align 4
_aes_gcm_dec_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ldr q26, [x8, #128] // load rk8
sub x5, x5, #1 // byte_len - 1
ldr q25, [x8, #112] // load rk7
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
add x4, x0, x1, lsr #3 // end_input_ptr
ldr q24, [x8, #96] // load rk6
lsr x12, x11, #32
ldr q23, [x8, #80] // load rk5
orr w11, w11, w11
ldr q21, [x8, #48] // load rk3
add x5, x5, x0
rev w12, w12 // rev_ctr32
add w12, w12, #1 // increment rev_ctr32
fmov d3, x10 // CTR block 3
rev w9, w12 // CTR block 1
add w12, w12, #1 // CTR block 1
fmov d1, x10 // CTR block 1
orr x9, x11, x9, lsl #32 // CTR block 1
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
fmov d2, x10 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 3
ldr q18, [x8, #0] // load rk0
fmov v3.d[1], x9 // CTR block 3
add w12, w12, #1 // CTR block 3
ldr q22, [x8, #64] // load rk4
ldr q19, [x8, #16] // load rk1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q20, [x8, #32] // load rk2
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q27, [x8, #144] // load rk9
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q28, [x8, #160] // load rk10
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
ldr q29, [x8, #176] // load rk11
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
b.lt Ldec_finish_first_blocks // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
b.eq Ldec_finish_first_blocks // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Ldec_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v2.16b, v31.16b // AES block 2 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
aese v3.16b, v31.16b // AES block 3 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
b.ge Ldec_tail // handle tail
ldr q4, [x0, #0] // AES block 0 - load ciphertext
ldr q5, [x0, #16] // AES block 1 - load ciphertext
rev w9, w12 // CTR block 4
eor v0.16b, v4.16b, v0.16b // AES block 0 - result
eor v1.16b, v5.16b, v1.16b // AES block 1 - result
rev64 v5.16b, v5.16b // GHASH block 1
ldr q7, [x0, #48] // AES block 3 - load ciphertext
mov x7, v0.d[1] // AES block 0 - mov high
mov x6, v0.d[0] // AES block 0 - mov low
rev64 v4.16b, v4.16b // GHASH block 0
add w12, w12, #1 // CTR block 4
fmov d0, x10 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
mov x19, v1.d[0] // AES block 1 - mov low
orr x9, x11, x9, lsl #32 // CTR block 5
mov x20, v1.d[1] // AES block 1 - mov high
eor x7, x7, x14 // AES block 0 - round N high
eor x6, x6, x13 // AES block 0 - round N low
stp x6, x7, [x2], #16 // AES block 0 - store result
fmov d1, x10 // CTR block 5
ldr q6, [x0, #32] // AES block 2 - load ciphertext
add x0, x0, #64 // AES input_ptr update
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
add w12, w12, #1 // CTR block 6
eor x19, x19, x13 // AES block 1 - round N low
orr x9, x11, x9, lsl #32 // CTR block 6
eor x20, x20, x14 // AES block 1 - round N high
stp x19, x20, [x2], #16 // AES block 1 - store result
eor v2.16b, v6.16b, v2.16b // AES block 2 - result
cmp x0, x5 // check if we have <= 8 blocks
b.ge Ldec_prepretail // do prepretail
Ldec_main_loop: // main loop start
mov x21, v2.d[0] // AES block 4k+2 - mov low
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
eor v4.16b, v4.16b, v11.16b // PRE 1
rev w9, w12 // CTR block 4k+7
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x23, v3.d[0] // AES block 4k+3 - mov low
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov v3.d[1], x9 // CTR block 4k+7
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor x22, x22, x14 // AES block 4k+2 - round N high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
rev64 v6.16b, v6.16b // GHASH block 4k+2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x21, x21, x13 // AES block 4k+2 - round N low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
rev64 v7.16b, v7.16b // GHASH block 4k+3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor x23, x23, x13 // AES block 4k+3 - round N low
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor x24, x24, x14 // AES block 4k+3 - round N high
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
add w12, w12, #1 // CTR block 4k+7
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
rev w9, w12 // CTR block 4k+8
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
add w12, w12, #1 // CTR block 4k+8
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
movi v8.8b, #0xc2
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
b.lt Ldec_main_loop_continue // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Ldec_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_main_loop_continue:
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext
eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext
ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext
mov x7, v0.d[1] // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
add x0, x0, #64 // AES input_ptr update
mov x6, v0.d[0] // AES block 4k+4 - mov low
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result
rev w9, w12 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+9
cmp x0, x5 // LOOP CONTROL
add w12, w12, #1 // CTR block 4k+9
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
mov x20, v1.d[1] // AES block 4k+5 - mov high
eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
mov x19, v1.d[0] // AES block 4k+5 - mov low
fmov d1, x10 // CTR block 4k+9
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
fmov v1.d[1], x9 // CTR block 4k+9
rev w9, w12 // CTR block 4k+10
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+10
rev64 v5.16b, v5.16b // GHASH block 4k+5
eor x20, x20, x14 // AES block 4k+5 - round N high
stp x6, x7, [x2], #16 // AES block 4k+4 - store result
eor x19, x19, x13 // AES block 4k+5 - round N low
stp x19, x20, [x2], #16 // AES block 4k+5 - store result
rev64 v4.16b, v4.16b // GHASH block 4k+4
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
b.lt Ldec_main_loop
Ldec_prepretail: // PREPRETAIL
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
mov x21, v2.d[0] // AES block 4k+2 - mov low
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
rev w9, w12 // CTR block 4k+7
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v6.16b, v6.16b // GHASH block 4k+2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
mov x23, v3.d[0] // AES block 4k+3 - mov low
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
fmov v3.d[1], x9 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
rev64 v7.16b, v7.16b // GHASH block 4k+3
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
b.lt Ldec_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
b.eq Ldec_finish_prepretail // branch if AES-192
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_finish_prepretail:
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor x22, x22, x14 // AES block 4k+2 - round N high
eor x23, x23, x13 // AES block 4k+3 - round N low
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
add w12, w12, #1 // CTR block 4k+7
eor x21, x21, x13 // AES block 4k+2 - round N low
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor x24, x24, x14 // AES block 4k+3 - round N high
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
Ldec_tail: // TAIL
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext
eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result
mov x6, v0.d[0] // AES block 4k+4 - mov low
mov x7, v0.d[1] // AES block 4k+4 - mov high
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
cmp x5, #48
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
b.gt Ldec_blocks_more_than_3
sub w12, w12, #1
mov v3.16b, v2.16b
movi v10.8b, #0
movi v11.8b, #0
cmp x5, #32
movi v9.8b, #0
mov v2.16b, v1.16b
b.gt Ldec_blocks_more_than_2
sub w12, w12, #1
mov v3.16b, v1.16b
cmp x5, #16
b.gt Ldec_blocks_more_than_1
sub w12, w12, #1
b Ldec_blocks_less_than_1
Ldec_blocks_more_than_3: // blocks left > 3
rev64 v4.16b, v5.16b // GHASH final-3 block
ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext
stp x6, x7, [x2], #16 // AES final-3 block - store result
mov d10, v17.d[1] // GHASH final-3 block - mid
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor v0.16b, v5.16b, v1.16b // AES final-2 block - result
mov d22, v4.d[1] // GHASH final-3 block - mid
mov x6, v0.d[0] // AES final-2 block - mov low
mov x7, v0.d[1] // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor x6, x6, x13 // AES final-2 block - round N low
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
eor x7, x7, x14 // AES final-2 block - round N high
Ldec_blocks_more_than_2: // blocks left > 2
rev64 v4.16b, v5.16b // GHASH final-2 block
ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
stp x6, x7, [x2], #16 // AES final-2 block - store result
eor v0.16b, v5.16b, v2.16b // AES final-1 block - result
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
mov x6, v0.d[0] // AES final-1 block - mov low
mov x7, v0.d[1] // AES final-1 block - mov high
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
movi v8.8b, #0 // suppress further partial tag feed in
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
eor x6, x6, x13 // AES final-1 block - round N low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
eor x7, x7, x14 // AES final-1 block - round N high
Ldec_blocks_more_than_1: // blocks left > 1
stp x6, x7, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
mov d22, v4.d[1] // GHASH final-1 block - mid
eor v0.16b, v5.16b, v3.16b // AES final block - result
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
mov x6, v0.d[0] // AES final block - mov low
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
mov x7, v0.d[1] // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
eor x6, x6, x13 // AES final block - round N low
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor x7, x7, x14 // AES final block - round N high
Ldec_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x14, xzr // rkN_h = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
ldp x4, x5, [x2] // load existing bytes we need to not overwrite
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x9, x13, x14, lt
csel x10, x14, xzr, lt
fmov d0, x9 // ctr0b is mask for last block
and x6, x6, x9
mov v0.d[1], x10
bic x4, x4, x9 // mask out low existing bytes
rev w9, w12
bic x5, x5, x10 // mask out high existing bytes
orr x6, x6, x4
and x7, x7, x10
orr x7, x7, x5
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
mov d8, v4.d[1] // GHASH final block - mid
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
stp x6, x7, [x2]
str w9, [x16, #12] // store the updated counter
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
chairq/First-choice
| 49,096
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/sha512-armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the OpenSSL license (the "License"). You may not use
// this file except in compliance with the License. You can obtain a copy
// in the file LICENSE in the source distribution or at
// https://www.openssl.org/source/license.html
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project. The module is, however, dual licensed under OpenSSL and
// CRYPTOGAMS licenses depending on where you obtain it. For further
// details see http://www.openssl.org/~appro/cryptogams/.
//
// Permission to use under GPLv2 terms is granted.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
# include <ring-core/arm_arch.h>
#endif
.text
.private_extern _OPENSSL_armcap_P
.globl _sha512_block_data_order
.private_extern _sha512_block_data_order
.align 6
_sha512_block_data_order:
AARCH64_VALID_CALL_TARGET
#ifndef __KERNEL__
#if defined(OPENSSL_HWASAN) && __clang_major__ >= 10
adrp x16,:pg_hi21_nc:_OPENSSL_armcap_P
#else
adrp x16,_OPENSSL_armcap_P@PAGE
#endif
ldr w16,[x16,_OPENSSL_armcap_P@PAGEOFF]
tst w16,#ARMV8_SHA512
b.ne Lv8_entry
#endif
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*8
ldp x20,x21,[x0] // load context
ldp x22,x23,[x0,#2*8]
ldp x24,x25,[x0,#4*8]
add x2,x1,x2,lsl#7 // end of input
ldp x26,x27,[x0,#6*8]
adrp x30,LK512@PAGE
add x30,x30,LK512@PAGEOFF
stp x0,x2,[x29,#96]
Loop:
ldp x3,x4,[x1],#2*8
ldr x19,[x30],#8 // *K++
eor x28,x21,x22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev x3,x3 // 0
#endif
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x6,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x3 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x4,x4 // 1
#endif
ldp x5,x6,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x7,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x4 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x5,x5 // 2
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x8,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x5 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x6,x6 // 3
#endif
ldp x7,x8,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x9,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x6 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x7,x7 // 4
#endif
add x24,x24,x17 // h+=Sigma0(a)
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x10,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x7 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x10,ror#18 // Sigma1(e)
ror x10,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x10,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x8,x8 // 5
#endif
ldp x9,x10,[x1],#2*8
add x23,x23,x17 // h+=Sigma0(a)
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x11,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x8 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x11,ror#18 // Sigma1(e)
ror x11,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x11,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x9,x9 // 6
#endif
add x22,x22,x17 // h+=Sigma0(a)
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x12,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x9 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x12,ror#18 // Sigma1(e)
ror x12,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x12,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x10,x10 // 7
#endif
ldp x11,x12,[x1],#2*8
add x21,x21,x17 // h+=Sigma0(a)
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
eor x13,x25,x25,ror#23
and x17,x26,x25
bic x28,x27,x25
add x20,x20,x10 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x13,ror#18 // Sigma1(e)
ror x13,x21,#28
add x20,x20,x17 // h+=Ch(e,f,g)
eor x17,x21,x21,ror#5
add x20,x20,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x24,x24,x20 // d+=h
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x13,x17,ror#34 // Sigma0(a)
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x20,x20,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x11,x11 // 8
#endif
add x20,x20,x17 // h+=Sigma0(a)
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x14,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x11 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x14,ror#18 // Sigma1(e)
ror x14,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x14,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x12,x12 // 9
#endif
ldp x13,x14,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x15,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x12 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x15,ror#18 // Sigma1(e)
ror x15,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x15,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x13,x13 // 10
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x0,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x13 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x0,ror#18 // Sigma1(e)
ror x0,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x0,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x14,x14 // 11
#endif
ldp x15,x0,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x6,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x14 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x15,x15 // 12
#endif
add x24,x24,x17 // h+=Sigma0(a)
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x7,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x15 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x0,x0 // 13
#endif
ldp x1,x2,[x1]
add x23,x23,x17 // h+=Sigma0(a)
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x8,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x0 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x1,x1 // 14
#endif
ldr x6,[sp,#24]
add x22,x22,x17 // h+=Sigma0(a)
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x9,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x1 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x2,x2 // 15
#endif
ldr x7,[sp,#0]
add x21,x21,x17 // h+=Sigma0(a)
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
Loop_16_xx:
ldr x8,[sp,#8]
str x11,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x10,x5,#1
and x17,x25,x24
ror x9,x2,#19
bic x19,x26,x24
ror x11,x20,#28
add x27,x27,x3 // h+=X[i]
eor x16,x16,x24,ror#18
eor x10,x10,x5,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x11,x11,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x9,x9,x2,ror#61
eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x11,x20,ror#39 // Sigma0(a)
eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
add x4,x4,x13
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x4,x4,x10
add x27,x27,x17 // h+=Sigma0(a)
add x4,x4,x9
ldr x9,[sp,#16]
str x12,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x11,x6,#1
and x17,x24,x23
ror x10,x3,#19
bic x28,x25,x23
ror x12,x27,#28
add x26,x26,x4 // h+=X[i]
eor x16,x16,x23,ror#18
eor x11,x11,x6,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x12,x12,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x10,x10,x3,ror#61
eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x12,x27,ror#39 // Sigma0(a)
eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
add x5,x5,x14
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x5,x5,x11
add x26,x26,x17 // h+=Sigma0(a)
add x5,x5,x10
ldr x10,[sp,#24]
str x13,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x12,x7,#1
and x17,x23,x22
ror x11,x4,#19
bic x19,x24,x22
ror x13,x26,#28
add x25,x25,x5 // h+=X[i]
eor x16,x16,x22,ror#18
eor x12,x12,x7,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x13,x13,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x11,x11,x4,ror#61
eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x13,x26,ror#39 // Sigma0(a)
eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
add x6,x6,x15
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x6,x6,x12
add x25,x25,x17 // h+=Sigma0(a)
add x6,x6,x11
ldr x11,[sp,#0]
str x14,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x13,x8,#1
and x17,x22,x21
ror x12,x5,#19
bic x28,x23,x21
ror x14,x25,#28
add x24,x24,x6 // h+=X[i]
eor x16,x16,x21,ror#18
eor x13,x13,x8,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x14,x14,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x12,x12,x5,ror#61
eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x14,x25,ror#39 // Sigma0(a)
eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
add x7,x7,x0
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x7,x7,x13
add x24,x24,x17 // h+=Sigma0(a)
add x7,x7,x12
ldr x12,[sp,#8]
str x15,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x14,x9,#1
and x17,x21,x20
ror x13,x6,#19
bic x19,x22,x20
ror x15,x24,#28
add x23,x23,x7 // h+=X[i]
eor x16,x16,x20,ror#18
eor x14,x14,x9,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x15,x15,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x13,x13,x6,ror#61
eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x15,x24,ror#39 // Sigma0(a)
eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
add x8,x8,x1
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x8,x8,x14
add x23,x23,x17 // h+=Sigma0(a)
add x8,x8,x13
ldr x13,[sp,#16]
str x0,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x15,x10,#1
and x17,x20,x27
ror x14,x7,#19
bic x28,x21,x27
ror x0,x23,#28
add x22,x22,x8 // h+=X[i]
eor x16,x16,x27,ror#18
eor x15,x15,x10,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x0,x0,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x14,x14,x7,ror#61
eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x0,x23,ror#39 // Sigma0(a)
eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
add x9,x9,x2
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x9,x9,x15
add x22,x22,x17 // h+=Sigma0(a)
add x9,x9,x14
ldr x14,[sp,#24]
str x1,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x0,x11,#1
and x17,x27,x26
ror x15,x8,#19
bic x19,x20,x26
ror x1,x22,#28
add x21,x21,x9 // h+=X[i]
eor x16,x16,x26,ror#18
eor x0,x0,x11,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x1,x1,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x15,x15,x8,ror#61
eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x1,x22,ror#39 // Sigma0(a)
eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
add x10,x10,x3
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x10,x10,x0
add x21,x21,x17 // h+=Sigma0(a)
add x10,x10,x15
ldr x15,[sp,#0]
str x2,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x1,x12,#1
and x17,x26,x25
ror x0,x9,#19
bic x28,x27,x25
ror x2,x21,#28
add x20,x20,x10 // h+=X[i]
eor x16,x16,x25,ror#18
eor x1,x1,x12,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x2,x2,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x0,x0,x9,ror#61
eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x2,x21,ror#39 // Sigma0(a)
eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
add x11,x11,x4
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x11,x11,x1
add x20,x20,x17 // h+=Sigma0(a)
add x11,x11,x0
ldr x0,[sp,#8]
str x3,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x2,x13,#1
and x17,x25,x24
ror x1,x10,#19
bic x19,x26,x24
ror x3,x20,#28
add x27,x27,x11 // h+=X[i]
eor x16,x16,x24,ror#18
eor x2,x2,x13,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x3,x3,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x1,x1,x10,ror#61
eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x3,x20,ror#39 // Sigma0(a)
eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
add x12,x12,x5
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x12,x12,x2
add x27,x27,x17 // h+=Sigma0(a)
add x12,x12,x1
ldr x1,[sp,#16]
str x4,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x3,x14,#1
and x17,x24,x23
ror x2,x11,#19
bic x28,x25,x23
ror x4,x27,#28
add x26,x26,x12 // h+=X[i]
eor x16,x16,x23,ror#18
eor x3,x3,x14,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x4,x4,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x2,x2,x11,ror#61
eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x4,x27,ror#39 // Sigma0(a)
eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
add x13,x13,x6
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x13,x13,x3
add x26,x26,x17 // h+=Sigma0(a)
add x13,x13,x2
ldr x2,[sp,#24]
str x5,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x4,x15,#1
and x17,x23,x22
ror x3,x12,#19
bic x19,x24,x22
ror x5,x26,#28
add x25,x25,x13 // h+=X[i]
eor x16,x16,x22,ror#18
eor x4,x4,x15,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x5,x5,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x3,x3,x12,ror#61
eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x5,x26,ror#39 // Sigma0(a)
eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
add x14,x14,x7
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x14,x14,x4
add x25,x25,x17 // h+=Sigma0(a)
add x14,x14,x3
ldr x3,[sp,#0]
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x5,x0,#1
and x17,x22,x21
ror x4,x13,#19
bic x28,x23,x21
ror x6,x25,#28
add x24,x24,x14 // h+=X[i]
eor x16,x16,x21,ror#18
eor x5,x5,x0,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x6,x6,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x4,x4,x13,ror#61
eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x25,ror#39 // Sigma0(a)
eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
add x15,x15,x8
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x15,x15,x5
add x24,x24,x17 // h+=Sigma0(a)
add x15,x15,x4
ldr x4,[sp,#8]
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x6,x1,#1
and x17,x21,x20
ror x5,x14,#19
bic x19,x22,x20
ror x7,x24,#28
add x23,x23,x15 // h+=X[i]
eor x16,x16,x20,ror#18
eor x6,x6,x1,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x7,x7,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x5,x5,x14,ror#61
eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x24,ror#39 // Sigma0(a)
eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
add x0,x0,x9
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x0,x0,x6
add x23,x23,x17 // h+=Sigma0(a)
add x0,x0,x5
ldr x5,[sp,#16]
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x7,x2,#1
and x17,x20,x27
ror x6,x15,#19
bic x28,x21,x27
ror x8,x23,#28
add x22,x22,x0 // h+=X[i]
eor x16,x16,x27,ror#18
eor x7,x7,x2,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x8,x8,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x6,x6,x15,ror#61
eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x23,ror#39 // Sigma0(a)
eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
add x1,x1,x10
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x1,x1,x7
add x22,x22,x17 // h+=Sigma0(a)
add x1,x1,x6
ldr x6,[sp,#24]
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x8,x3,#1
and x17,x27,x26
ror x7,x0,#19
bic x19,x20,x26
ror x9,x22,#28
add x21,x21,x1 // h+=X[i]
eor x16,x16,x26,ror#18
eor x8,x8,x3,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x9,x9,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x7,x7,x0,ror#61
eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x22,ror#39 // Sigma0(a)
eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
add x2,x2,x11
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x2,x2,x8
add x21,x21,x17 // h+=Sigma0(a)
add x2,x2,x7
ldr x7,[sp,#0]
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
cbnz x19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#648 // rewind
ldp x3,x4,[x0]
ldp x5,x6,[x0,#2*8]
add x1,x1,#14*8 // advance input pointer
ldp x7,x8,[x0,#4*8]
add x20,x20,x3
ldp x9,x10,[x0,#6*8]
add x21,x21,x4
add x22,x22,x5
add x23,x23,x6
stp x20,x21,[x0]
add x24,x24,x7
add x25,x25,x8
stp x22,x23,[x0,#2*8]
add x26,x26,x9
add x27,x27,x10
cmp x1,x2
stp x24,x25,[x0,#4*8]
stp x26,x27,[x0,#6*8]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*8
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section __TEXT,__const
.align 6
LK512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0 // terminator
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.align 6
sha512_block_armv8:
Lv8_entry:
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context
adrp x3,LK512@PAGE
add x3,x3,LK512@PAGEOFF
rev64 v16.16b,v16.16b
rev64 v17.16b,v17.16b
rev64 v18.16b,v18.16b
rev64 v19.16b,v19.16b
rev64 v20.16b,v20.16b
rev64 v21.16b,v21.16b
rev64 v22.16b,v22.16b
rev64 v23.16b,v23.16b
b Loop_hw
.align 4
Loop_hw:
ld1 {v24.2d},[x3],#16
subs x2,x2,#1
sub x4,x1,#128
orr v26.16b,v0.16b,v0.16b // offload
orr v27.16b,v1.16b,v1.16b
orr v28.16b,v2.16b,v2.16b
orr v29.16b,v3.16b,v3.16b
csel x1,x1,x4,ne // conditional rewind
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v16.2d
ld1 {v16.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v16.16b,v16.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v17.2d
ld1 {v17.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v17.16b,v17.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v18.2d
ld1 {v18.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v18.16b,v18.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v19.2d
ld1 {v19.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
rev64 v19.16b,v19.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v20.2d
ld1 {v20.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
rev64 v20.16b,v20.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v21.2d
ld1 {v21.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v21.16b,v21.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v22.2d
ld1 {v22.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v22.16b,v22.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
sub x3,x3,#80*8 // rewind
add v25.2d,v25.2d,v23.2d
ld1 {v23.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v23.16b,v23.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v0.2d,v0.2d,v26.2d // accumulate
add v1.2d,v1.2d,v27.2d
add v2.2d,v2.2d,v28.2d
add v3.2d,v3.2d,v29.2d
cbnz x2,Loop_hw
st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
chairq/First-choice
| 8,277
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/pregenerated/aesv8-armx-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <ring-core/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.section .rodata
.align 5
Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl aes_hw_set_encrypt_key
.def aes_hw_set_encrypt_key
.type 32
.endef
.align 5
aes_hw_set_encrypt_key:
Lenc_key:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x3,#-1
cmp x0,#0
b.eq Lenc_key_abort
cmp x2,#0
b.eq Lenc_key_abort
mov x3,#-2
cmp w1,#128
b.lt Lenc_key_abort
cmp w1,#256
b.gt Lenc_key_abort
tst w1,#0x3f
b.ne Lenc_key_abort
adrp x3,Lrcon
add x3,x3,:lo12:Lrcon
cmp w1,#192
eor v0.16b,v0.16b,v0.16b
ld1 {v3.16b},[x0],#16
mov w1,#8 // reuse w1
ld1 {v1.4s,v2.4s},[x3],#32
b.lt Loop128
// 192-bit key support was removed.
b L256
.align 4
Loop128:
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
b.ne Loop128
ld1 {v1.4s},[x3]
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2]
add x2,x2,#0x50
mov w12,#10
b Ldone
// 192-bit key support was removed.
.align 4
L256:
ld1 {v4.16b},[x0]
mov w1,#7
mov w12,#14
st1 {v3.4s},[x2],#16
Loop256:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2],#16
b.eq Ldone
dup v6.4s,v3.s[3] // just splat
ext v5.16b,v0.16b,v4.16b,#12
aese v6.16b,v0.16b
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
eor v4.16b,v4.16b,v6.16b
b Loop256
Ldone:
str w12,[x2]
mov x3,#0
Lenc_key_abort:
mov x0,x3 // return value
ldr x29,[sp],#16
ret
.globl aes_hw_encrypt
.def aes_hw_encrypt
.type 32
.endef
.align 5
aes_hw_encrypt:
AARCH64_VALID_CALL_TARGET
ldr w3,[x2,#240]
ld1 {v0.4s},[x2],#16
ld1 {v2.16b},[x0]
sub w3,w3,#2
ld1 {v1.4s},[x2],#16
Loop_enc:
aese v2.16b,v0.16b
aesmc v2.16b,v2.16b
ld1 {v0.4s},[x2],#16
subs w3,w3,#2
aese v2.16b,v1.16b
aesmc v2.16b,v2.16b
ld1 {v1.4s},[x2],#16
b.gt Loop_enc
aese v2.16b,v0.16b
aesmc v2.16b,v2.16b
ld1 {v0.4s},[x2]
aese v2.16b,v1.16b
eor v2.16b,v2.16b,v0.16b
st1 {v2.16b},[x1]
ret
.globl aes_hw_ctr32_encrypt_blocks
.def aes_hw_ctr32_encrypt_blocks
.type 32
.endef
.align 5
aes_hw_ctr32_encrypt_blocks:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldr w5,[x3,#240]
ldr w8, [x4, #12]
ld1 {v0.4s},[x4]
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#4
mov x12,#16
cmp x2,#2
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
sub w5,w5,#2
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
csel x12,xzr,x12,lo
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __AARCH64EB__
rev w8, w8
#endif
add w10, w8, #1
orr v6.16b,v0.16b,v0.16b
rev w10, w10
mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls Lctr32_tail
rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias
orr v18.16b,v6.16b,v6.16b
b Loop3x_ctr32
.align 4
Loop3x_ctr32:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v18.16b,v17.16b
aesmc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Loop3x_ctr32
aese v0.16b,v16.16b
aesmc v4.16b,v0.16b
aese v1.16b,v16.16b
aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16
add w9,w8,#1
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
rev w9,w9
aese v4.16b,v17.16b
aesmc v4.16b,v4.16b
aese v5.16b,v17.16b
aesmc v5.16b,v5.16b
ld1 {v19.16b},[x0],#16
mov x7,x3
aese v18.16b,v17.16b
aesmc v17.16b,v18.16b
aese v4.16b,v20.16b
aesmc v4.16b,v4.16b
aese v5.16b,v20.16b
aesmc v5.16b,v5.16b
eor v2.16b,v2.16b,v7.16b
add w10,w8,#2
aese v17.16b,v20.16b
aesmc v17.16b,v17.16b
eor v3.16b,v3.16b,v7.16b
add w8,w8,#3
aese v4.16b,v21.16b
aesmc v4.16b,v4.16b
aese v5.16b,v21.16b
aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b
mov v6.s[3], w9
aese v17.16b,v21.16b
aesmc v17.16b,v17.16b
orr v0.16b,v6.16b,v6.16b
rev w10,w10
aese v4.16b,v22.16b
aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b
aesmc v5.16b,v5.16b
orr v1.16b,v6.16b,v6.16b
mov v6.s[3], w12
aese v17.16b,v22.16b
aesmc v17.16b,v17.16b
orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3
aese v4.16b,v23.16b
aese v5.16b,v23.16b
aese v17.16b,v23.16b
eor v2.16b,v2.16b,v4.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
st1 {v2.16b},[x1],#16
eor v3.16b,v3.16b,v5.16b
mov w6,w5
st1 {v3.16b},[x1],#16
eor v19.16b,v19.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v19.16b},[x1],#16
b.hs Loop3x_ctr32
adds x2,x2,#3
b.eq Lctr32_done
cmp x2,#1
mov x12,#16
csel x12,xzr,x12,eq
Lctr32_tail:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v17.4s},[x7],#16
b.gt Lctr32_tail
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v2.16b},[x0],x12
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
ld1 {v3.16b},[x0]
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
eor v2.16b,v2.16b,v7.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
eor v3.16b,v3.16b,v7.16b
aese v0.16b,v23.16b
aese v1.16b,v23.16b
cmp x2,#1
eor v2.16b,v2.16b,v0.16b
eor v3.16b,v3.16b,v1.16b
st1 {v2.16b},[x1],#16
b.eq Lctr32_done
st1 {v3.16b},[x1]
Lctr32_done:
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
chairq/First-choice
| 2,659
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/third_party/fiat/asm/fiat_curve25519_adx_square.S
|
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \
(defined(__APPLE__) || defined(__ELF__))
.intel_syntax noprefix
.text
#if defined(__APPLE__)
.private_extern _fiat_curve25519_adx_square
.global _fiat_curve25519_adx_square
_fiat_curve25519_adx_square:
#else
.type fiat_curve25519_adx_square, @function
.hidden fiat_curve25519_adx_square
.global fiat_curve25519_adx_square
fiat_curve25519_adx_square:
#endif
.cfi_startproc
_CET_ENDBR
push rbp
.cfi_adjust_cfa_offset 8
.cfi_offset rbp, -16
mov rbp, rsp
mov rdx, [ rsi + 0x0 ]
mulx r10, rax, [ rsi + 0x8 ]
mov rdx, [ rsi + 0x0 ]
mulx rcx, r11, [ rsi + 0x10 ]
xor rdx, rdx
adox r11, r10
mov rdx, [ rsi + 0x0 ]
mulx r9, r8, [ rsi + 0x18 ]
mov rdx, [ rsi + 0x8 ]
mov [ rsp - 0x80 ], rbx
.cfi_offset rbx, -16-0x80
mulx rbx, r10, [ rsi + 0x18 ]
adox r8, rcx
mov [rsp - 0x48 ], rdi
adox r10, r9
adcx rax, rax
mov rdx, [ rsi + 0x10 ]
mulx r9, rcx, [ rsi + 0x18 ]
adox rcx, rbx
mov rdx, [ rsi + 0x10 ]
mulx rdi, rbx, [ rsi + 0x8 ]
mov rdx, 0x0
adox r9, rdx
mov [ rsp - 0x70 ], r12
.cfi_offset r12, -16-0x70
mov r12, -0x3
inc r12
adox rbx, r8
adox rdi, r10
adcx r11, r11
mov r8, rdx
adox r8, rcx
mov r10, rdx
adox r10, r9
adcx rbx, rbx
mov rdx, [ rsi + 0x0 ]
mulx r9, rcx, rdx
mov rdx, [ rsi + 0x8 ]
mov [ rsp - 0x68 ], r13
.cfi_offset r13, -16-0x68
mov [ rsp - 0x60 ], r14
.cfi_offset r14, -16-0x60
mulx r14, r13, rdx
seto dl
inc r12
adox r9, rax
adox r13, r11
adox r14, rbx
adcx rdi, rdi
mov al, dl
mov rdx, [ rsi + 0x10 ]
mulx rbx, r11, rdx
adox r11, rdi
adcx r8, r8
adox rbx, r8
adcx r10, r10
movzx rdx, al
mov rdi, 0x0
adcx rdx, rdi
movzx r8, al
lea r8, [ r8 + rdx ]
mov rdx, [ rsi + 0x18 ]
mulx rdi, rax, rdx
adox rax, r10
mov rdx, 0x26
mov [ rsp - 0x58 ], r15
.cfi_offset r15, -16-0x58
mulx r15, r10, r11
clc
adcx r10, rcx
mulx r11, rcx, rbx
adox r8, rdi
mulx rdi, rbx, r8
inc r12
adox rcx, r9
mulx r8, r9, rax
adcx r15, rcx
adox r9, r13
adcx r11, r9
adox rbx, r14
adox rdi, r12
adcx r8, rbx
adc rdi, 0x0
mulx r14, r13, rdi
test al, al
mov rdi, [ rsp - 0x48 ]
adox r13, r10
mov r14, r12
adox r14, r15
mov [ rdi + 0x8 ], r14
mov rax, r12
adox rax, r11
mov r10, r12
adox r10, r8
mov [ rdi + 0x10 ], rax
mov rcx, r12
cmovo rcx, rdx
adcx r13, rcx
mov [ rdi + 0x0 ], r13
mov [ rdi + 0x18 ], r10
mov rbx, [ rsp - 0x80 ]
.cfi_restore rbx
mov r12, [ rsp - 0x70 ]
.cfi_restore r12
mov r13, [ rsp - 0x68 ]
.cfi_restore r13
mov r14, [ rsp - 0x60 ]
.cfi_restore r14
mov r15, [ rsp - 0x58 ]
.cfi_restore r15
pop rbp
.cfi_restore rbp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
#if defined(__ELF__)
.size fiat_curve25519_adx_square, .-fiat_curve25519_adx_square
#endif
#endif
|
chairq/First-choice
| 3,464
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/third_party/fiat/asm/fiat_curve25519_adx_mul.S
|
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \
(defined(__APPLE__) || defined(__ELF__))
.intel_syntax noprefix
.text
#if defined(__APPLE__)
.private_extern _fiat_curve25519_adx_mul
.global _fiat_curve25519_adx_mul
_fiat_curve25519_adx_mul:
#else
.type fiat_curve25519_adx_mul, @function
.hidden fiat_curve25519_adx_mul
.global fiat_curve25519_adx_mul
fiat_curve25519_adx_mul:
#endif
.cfi_startproc
_CET_ENDBR
push rbp
.cfi_adjust_cfa_offset 8
.cfi_offset rbp, -16
mov rbp, rsp
mov rax, rdx
mov rdx, [ rsi + 0x18 ]
mulx r11, r10, [ rax + 0x8 ]
mov rdx, [ rax + 0x0 ]
mov [ rsp - 0x58 ], r15
.cfi_offset r15, -16-0x58
mulx r8, rcx, [ rsi + 0x18 ]
mov rdx, [ rsi + 0x8 ]
mov [ rsp - 0x80 ], rbx
.cfi_offset rbx, -16-0x80
mulx rbx, r9, [ rax + 0x18 ]
mov rdx, [ rsi + 0x8 ]
mov [ rsp - 0x70 ], r12
.cfi_offset r12, -16-0x70
mulx r15, r12, [ rax + 0x8 ]
mov rdx, [ rsi + 0x0 ]
mov [ rsp - 0x68 ], r13
.cfi_offset r13, -16-0x68
mov [ rsp - 0x60 ], r14
.cfi_offset r14, -16-0x60
mulx r14, r13, [ rax + 0x0 ]
mov rdx, [ rax + 0x10 ]
mov [ rsp - 0x18 ], r15
mov [ rsp - 0x50 ], rdi
mulx rdi, r15, [ rsi + 0x0 ]
mov rdx, [ rax + 0x18 ]
mov [ rsp - 0x48 ], r13
mov [ rsp - 0x40 ], r9
mulx r9, r13, [ rsi + 0x0 ]
test al, al
adox rcx, rdi
mov rdx, [ rsi + 0x10 ]
mov [ rsp - 0x38 ], r13
mulx r13, rdi, [ rax + 0x8 ]
adox r10, r9
mov rdx, 0x0
adox rbx, rdx
adcx rdi, rcx
adcx r8, r10
mov r9, rdx
adcx r9, rbx
mov rdx, [ rsi + 0x10 ]
mulx r10, rcx, [ rax + 0x0 ]
mov rdx, [ rsi + 0x0 ]
mov [ rsp - 0x30 ], r15
mulx r15, rbx, [ rax + 0x8 ]
mov rdx, -0x2
inc rdx
adox rcx, r15
setc r15b
clc
adcx rcx, r12
adox r10, rdi
mov rdx, [ rax + 0x10 ]
mov [ rsp - 0x78 ], rcx
mulx rcx, rdi, [ rsi + 0x10 ]
adox rdi, r8
mov rdx, [ rax + 0x18 ]
mov [ rsp - 0x28 ], rcx
mulx rcx, r8, [ rsi + 0x10 ]
mov rdx, [ rax + 0x10 ]
mov [ rsp - 0x20 ], r8
mulx r12, r8, [ rsi + 0x18 ]
adox r8, r9
mov rdx, [ rsi + 0x8 ]
mov [ rsp - 0x10 ], r12
mulx r12, r9, [ rax + 0x10 ]
movzx rdx, r15b
lea rdx, [ rdx + rcx ]
adcx r9, r10
adcx r13, rdi
mov r15, 0x0
mov r10, r15
adox r10, rdx
mov rdx, [ rax + 0x18 ]
mulx rcx, rdi, [ rsi + 0x18 ]
adox rcx, r15
adcx r11, r8
mov rdx, r15
adcx rdx, r10
adcx rcx, r15
mov r8, rdx
mov rdx, [ rax + 0x0 ]
mulx r15, r10, [ rsi + 0x8 ]
test al, al
adox r10, r14
adcx rbx, r10
adox r15, [ rsp - 0x78 ]
adcx r15, [ rsp - 0x30 ]
adox r9, [ rsp - 0x18 ]
adcx r9, [ rsp - 0x38 ]
adox r13, [ rsp - 0x40 ]
adcx r12, r13
adox r11, [ rsp - 0x20 ]
adcx r11, [ rsp - 0x28 ]
mov rdx, 0x26
mulx rsi, r14, r12
adox rdi, r8
adcx rdi, [ rsp - 0x10 ]
mulx r10, r8, r11
mov r13, 0x0
adox rcx, r13
adcx rcx, r13
mulx r11, r12, rdi
xor rdi, rdi
adox r8, rbx
adox r12, r15
mulx rbx, r13, rcx
adcx r14, [ rsp - 0x48 ]
adox r13, r9
adox rbx, rdi
adcx rsi, r8
adcx r10, r12
adcx r11, r13
adc rbx, 0x0
mulx r9, r15, rbx
xor r9, r9
adox r15, r14
mov rdi, r9
adox rdi, rsi
mov rcx, r9
adox rcx, r10
mov r8, [ rsp - 0x50 ]
mov [ r8 + 0x8 ], rdi
mov r12, r9
adox r12, r11
mov r14, r9
cmovo r14, rdx
mov [ r8 + 0x18 ], r12
adcx r15, r14
mov [ r8 + 0x0 ], r15
mov [ r8 + 0x10 ], rcx
mov rbx, [ rsp - 0x80 ]
.cfi_restore rbx
mov r12, [ rsp - 0x70 ]
.cfi_restore r12
mov r13, [ rsp - 0x68 ]
.cfi_restore r13
mov r14, [ rsp - 0x60 ]
.cfi_restore r14
mov r15, [ rsp - 0x58 ]
.cfi_restore r15
pop rbp
.cfi_restore rbp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
#if defined(__ELF__)
.size fiat_curve25519_adx_mul, .-fiat_curve25519_adx_mul
#endif
#endif
|
chairq/First-choice
| 62,534
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/crypto/poly1305/poly1305_arm_asm.S
|
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
#pragma GCC diagnostic ignored "-Wlanguage-extension-token"
# This implementation was taken from the public domain, neon2 version in
# SUPERCOP by D. J. Bernstein and Peter Schwabe.
# qhasm: int32 input_0
# qhasm: int32 input_1
# qhasm: int32 input_2
# qhasm: int32 input_3
# qhasm: stack32 input_4
# qhasm: stack32 input_5
# qhasm: stack32 input_6
# qhasm: stack32 input_7
# qhasm: int32 caller_r4
# qhasm: int32 caller_r5
# qhasm: int32 caller_r6
# qhasm: int32 caller_r7
# qhasm: int32 caller_r8
# qhasm: int32 caller_r9
# qhasm: int32 caller_r10
# qhasm: int32 caller_r11
# qhasm: int32 caller_r12
# qhasm: int32 caller_r14
# qhasm: reg128 caller_q4
# qhasm: reg128 caller_q5
# qhasm: reg128 caller_q6
# qhasm: reg128 caller_q7
# qhasm: startcode
.fpu neon
.text
# qhasm: reg128 r0
# qhasm: reg128 r1
# qhasm: reg128 r2
# qhasm: reg128 r3
# qhasm: reg128 r4
# qhasm: reg128 x01
# qhasm: reg128 x23
# qhasm: reg128 x4
# qhasm: reg128 y0
# qhasm: reg128 y12
# qhasm: reg128 y34
# qhasm: reg128 5y12
# qhasm: reg128 5y34
# qhasm: stack128 y0_stack
# qhasm: stack128 y12_stack
# qhasm: stack128 y34_stack
# qhasm: stack128 5y12_stack
# qhasm: stack128 5y34_stack
# qhasm: reg128 z0
# qhasm: reg128 z12
# qhasm: reg128 z34
# qhasm: reg128 5z12
# qhasm: reg128 5z34
# qhasm: stack128 z0_stack
# qhasm: stack128 z12_stack
# qhasm: stack128 z34_stack
# qhasm: stack128 5z12_stack
# qhasm: stack128 5z34_stack
# qhasm: stack128 two24
# qhasm: int32 ptr
# qhasm: reg128 c01
# qhasm: reg128 c23
# qhasm: reg128 d01
# qhasm: reg128 d23
# qhasm: reg128 t0
# qhasm: reg128 t1
# qhasm: reg128 t2
# qhasm: reg128 t3
# qhasm: reg128 t4
# qhasm: reg128 mask
# qhasm: reg128 u0
# qhasm: reg128 u1
# qhasm: reg128 u2
# qhasm: reg128 u3
# qhasm: reg128 u4
# qhasm: reg128 v01
# qhasm: reg128 mid
# qhasm: reg128 v23
# qhasm: reg128 v4
# qhasm: int32 len
# qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks
.align 4
.global openssl_poly1305_neon2_blocks
.hidden openssl_poly1305_neon2_blocks
.type openssl_poly1305_neon2_blocks STT_FUNC
openssl_poly1305_neon2_blocks:
vpush {q4,q5,q6,q7}
mov r12,sp
sub sp,sp,#192
bic sp,sp,#31
# qhasm: len = input_3
# asm 1: mov >len=int32#4,<input_3=int32#4
# asm 2: mov >len=r3,<input_3=r3
mov r3,r3
# qhasm: new y0
# qhasm: y0 = mem64[input_1]y0[1]; input_1 += 8
# asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]!
# asm 2: vld1.8 {<y0=d0},[<input_1=r1]!
vld1.8 {d0},[r1]!
# qhasm: y12 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]!
vld1.8 {d2-d3},[r1]!
# qhasm: y34 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]!
vld1.8 {d4-d5},[r1]!
# qhasm: input_1 += 8
# asm 1: add >input_1=int32#2,<input_1=int32#2,#8
# asm 2: add >input_1=r1,<input_1=r1,#8
add r1,r1,#8
# qhasm: new z0
# qhasm: z0 = mem64[input_1]z0[1]; input_1 += 8
# asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]!
# asm 2: vld1.8 {<z0=d6},[<input_1=r1]!
vld1.8 {d6},[r1]!
# qhasm: z12 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]!
vld1.8 {d8-d9},[r1]!
# qhasm: z34 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]!
vld1.8 {d10-d11},[r1]!
# qhasm: 2x mask = 0xffffffff
# asm 1: vmov.i64 >mask=reg128#7,#0xffffffff
# asm 2: vmov.i64 >mask=q6,#0xffffffff
vmov.i64 q6,#0xffffffff
# qhasm: 2x u4 = 0xff
# asm 1: vmov.i64 >u4=reg128#8,#0xff
# asm 2: vmov.i64 >u4=q7,#0xff
vmov.i64 q7,#0xff
# qhasm: x01 aligned= mem128[input_0];input_0+=16
# asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]!
# asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]!
vld1.8 {d16-d17},[r0,: 128]!
# qhasm: x23 aligned= mem128[input_0];input_0+=16
# asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]!
# asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]!
vld1.8 {d18-d19},[r0,: 128]!
# qhasm: x4 aligned= mem64[input_0]x4[1]
# asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64]
# asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64]
vld1.8 {d20},[r0,: 64]
# qhasm: input_0 -= 32
# asm 1: sub >input_0=int32#1,<input_0=int32#1,#32
# asm 2: sub >input_0=r0,<input_0=r0,#32
sub r0,r0,#32
# qhasm: 2x mask unsigned>>=6
# asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6
# asm 2: vshr.u64 >mask=q6,<mask=q6,#6
vshr.u64 q6,q6,#6
# qhasm: 2x u4 unsigned>>= 7
# asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7
# asm 2: vshr.u64 >u4=q7,<u4=q7,#7
vshr.u64 q7,q7,#7
# qhasm: 4x 5y12 = y12 << 2
# asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2
# asm 2: vshl.i32 >5y12=q11,<y12=q1,#2
vshl.i32 q11,q1,#2
# qhasm: 4x 5y34 = y34 << 2
# asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2
# asm 2: vshl.i32 >5y34=q12,<y34=q2,#2
vshl.i32 q12,q2,#2
# qhasm: 4x 5y12 += y12
# asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2
# asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1
vadd.i32 q11,q11,q1
# qhasm: 4x 5y34 += y34
# asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3
# asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2
vadd.i32 q12,q12,q2
# qhasm: 2x u4 <<= 24
# asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24
# asm 2: vshl.i64 >u4=q7,<u4=q7,#24
vshl.i64 q7,q7,#24
# qhasm: 4x 5z12 = z12 << 2
# asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2
# asm 2: vshl.i32 >5z12=q13,<z12=q4,#2
vshl.i32 q13,q4,#2
# qhasm: 4x 5z34 = z34 << 2
# asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2
# asm 2: vshl.i32 >5z34=q14,<z34=q5,#2
vshl.i32 q14,q5,#2
# qhasm: 4x 5z12 += z12
# asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5
# asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4
vadd.i32 q13,q13,q4
# qhasm: 4x 5z34 += z34
# asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6
# asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5
vadd.i32 q14,q14,q5
# qhasm: new two24
# qhasm: new y0_stack
# qhasm: new y12_stack
# qhasm: new y34_stack
# qhasm: new 5y12_stack
# qhasm: new 5y34_stack
# qhasm: new z0_stack
# qhasm: new z12_stack
# qhasm: new z34_stack
# qhasm: new 5z12_stack
# qhasm: new 5z34_stack
# qhasm: ptr = &two24
# asm 1: lea >ptr=int32#2,<two24=stack128#1
# asm 2: lea >ptr=r1,<two24=[sp,#0]
add r1,sp,#0
# qhasm: mem128[ptr] aligned= u4
# asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128]
vst1.8 {d14-d15},[r1,: 128]
# qhasm: r4 = u4
# asm 1: vmov >r4=reg128#16,<u4=reg128#8
# asm 2: vmov >r4=q15,<u4=q7
vmov q15,q7
# qhasm: r0 = u4
# asm 1: vmov >r0=reg128#8,<u4=reg128#8
# asm 2: vmov >r0=q7,<u4=q7
vmov q7,q7
# qhasm: ptr = &y0_stack
# asm 1: lea >ptr=int32#2,<y0_stack=stack128#2
# asm 2: lea >ptr=r1,<y0_stack=[sp,#16]
add r1,sp,#16
# qhasm: mem128[ptr] aligned= y0
# asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128]
vst1.8 {d0-d1},[r1,: 128]
# qhasm: ptr = &y12_stack
# asm 1: lea >ptr=int32#2,<y12_stack=stack128#3
# asm 2: lea >ptr=r1,<y12_stack=[sp,#32]
add r1,sp,#32
# qhasm: mem128[ptr] aligned= y12
# asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128]
vst1.8 {d2-d3},[r1,: 128]
# qhasm: ptr = &y34_stack
# asm 1: lea >ptr=int32#2,<y34_stack=stack128#4
# asm 2: lea >ptr=r1,<y34_stack=[sp,#48]
add r1,sp,#48
# qhasm: mem128[ptr] aligned= y34
# asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128]
vst1.8 {d4-d5},[r1,: 128]
# qhasm: ptr = &z0_stack
# asm 1: lea >ptr=int32#2,<z0_stack=stack128#7
# asm 2: lea >ptr=r1,<z0_stack=[sp,#96]
add r1,sp,#96
# qhasm: mem128[ptr] aligned= z0
# asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128]
vst1.8 {d6-d7},[r1,: 128]
# qhasm: ptr = &z12_stack
# asm 1: lea >ptr=int32#2,<z12_stack=stack128#8
# asm 2: lea >ptr=r1,<z12_stack=[sp,#112]
add r1,sp,#112
# qhasm: mem128[ptr] aligned= z12
# asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128]
vst1.8 {d8-d9},[r1,: 128]
# qhasm: ptr = &z34_stack
# asm 1: lea >ptr=int32#2,<z34_stack=stack128#9
# asm 2: lea >ptr=r1,<z34_stack=[sp,#128]
add r1,sp,#128
# qhasm: mem128[ptr] aligned= z34
# asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128]
vst1.8 {d10-d11},[r1,: 128]
# qhasm: ptr = &5y12_stack
# asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5
# asm 2: lea >ptr=r1,<5y12_stack=[sp,#64]
add r1,sp,#64
# qhasm: mem128[ptr] aligned= 5y12
# asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128]
vst1.8 {d22-d23},[r1,: 128]
# qhasm: ptr = &5y34_stack
# asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6
# asm 2: lea >ptr=r1,<5y34_stack=[sp,#80]
add r1,sp,#80
# qhasm: mem128[ptr] aligned= 5y34
# asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128]
vst1.8 {d24-d25},[r1,: 128]
# qhasm: ptr = &5z12_stack
# asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10
# asm 2: lea >ptr=r1,<5z12_stack=[sp,#144]
add r1,sp,#144
# qhasm: mem128[ptr] aligned= 5z12
# asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128]
vst1.8 {d26-d27},[r1,: 128]
# qhasm: ptr = &5z34_stack
# asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11
# asm 2: lea >ptr=r1,<5z34_stack=[sp,#160]
add r1,sp,#160
# qhasm: mem128[ptr] aligned= 5z34
# asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128]
vst1.8 {d28-d29},[r1,: 128]
# qhasm: unsigned>? len - 64
# asm 1: cmp <len=int32#4,#64
# asm 2: cmp <len=r3,#64
cmp r3,#64
# qhasm: goto below64bytes if !unsigned>
bls ._below64bytes
# qhasm: input_2 += 32
# asm 1: add >input_2=int32#2,<input_2=int32#3,#32
# asm 2: add >input_2=r1,<input_2=r2,#32
add r1,r2,#32
# qhasm: mainloop2:
._mainloop2:
# qhasm: c01 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]!
vld1.8 {d0-d1},[r1]!
# qhasm: c23 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]!
vld1.8 {d2-d3},[r1]!
# qhasm: r4[0,1] += x01[0] unsigned* z34[2]; r4[2,3] += x01[1] unsigned* z34[3]
# asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top
# asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11
vmlal.u32 q15,d16,d11
# qhasm: ptr = &z12_stack
# asm 1: lea >ptr=int32#3,<z12_stack=stack128#8
# asm 2: lea >ptr=r2,<z12_stack=[sp,#112]
add r2,sp,#112
# qhasm: z12 aligned= mem128[ptr]
# asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128]
vld1.8 {d4-d5},[r2,: 128]
# qhasm: r4[0,1] += x01[2] unsigned* z34[0]; r4[2,3] += x01[3] unsigned* z34[1]
# asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot
# asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10
vmlal.u32 q15,d17,d10
# qhasm: ptr = &z0_stack
# asm 1: lea >ptr=int32#3,<z0_stack=stack128#7
# asm 2: lea >ptr=r2,<z0_stack=[sp,#96]
add r2,sp,#96
# qhasm: z0 aligned= mem128[ptr]
# asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128]
vld1.8 {d6-d7},[r2,: 128]
# qhasm: r4[0,1] += x23[0] unsigned* z12[2]; r4[2,3] += x23[1] unsigned* z12[3]
# asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top
# asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5
vmlal.u32 q15,d18,d5
# qhasm: c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3]
# asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top
# asm 2: vtrn.32 <c01=d1,<c23=d3
vtrn.32 d1,d3
# qhasm: r4[0,1] += x23[2] unsigned* z12[0]; r4[2,3] += x23[3] unsigned* z12[1]
# asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4
vmlal.u32 q15,d19,d4
# qhasm: r4[0,1] += x4[0] unsigned* z0[0]; r4[2,3] += x4[1] unsigned* z0[1]
# asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6
vmlal.u32 q15,d20,d6
# qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
# asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18
# asm 2: vshll.u32 >r3=q4,<c23=d3,#18
vshll.u32 q4,d3,#18
# qhasm: c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3]
# asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot
# asm 2: vtrn.32 <c01=d0,<c23=d2
vtrn.32 d0,d2
# qhasm: r3[0,1] += x01[0] unsigned* z34[0]; r3[2,3] += x01[1] unsigned* z34[1]
# asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot
# asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10
vmlal.u32 q4,d16,d10
# qhasm: r3[0,1] += x01[2] unsigned* z12[2]; r3[2,3] += x01[3] unsigned* z12[3]
# asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top
# asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5
vmlal.u32 q4,d17,d5
# qhasm: r0 = r0[1]c01[0]r0[2,3]
# asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1
# asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1
vext.32 d14,d14,d0,#1
# qhasm: r3[0,1] += x23[0] unsigned* z12[0]; r3[2,3] += x23[1] unsigned* z12[1]
# asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4
vmlal.u32 q4,d18,d4
# qhasm: input_2 -= 64
# asm 1: sub >input_2=int32#2,<input_2=int32#2,#64
# asm 2: sub >input_2=r1,<input_2=r1,#64
sub r1,r1,#64
# qhasm: r3[0,1] += x23[2] unsigned* z0[0]; r3[2,3] += x23[3] unsigned* z0[1]
# asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6
vmlal.u32 q4,d19,d6
# qhasm: ptr = &5z34_stack
# asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11
# asm 2: lea >ptr=r2,<5z34_stack=[sp,#160]
add r2,sp,#160
# qhasm: 5z34 aligned= mem128[ptr]
# asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128]
vld1.8 {d10-d11},[r2,: 128]
# qhasm: r3[0,1] += x4[0] unsigned* 5z34[2]; r3[2,3] += x4[1] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11
vmlal.u32 q4,d20,d11
# qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
# asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8
# asm 2: vrev64.i32 >r0=q7,<r0=q7
vrev64.i32 q7,q7
# qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
# asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12
# asm 2: vshll.u32 >r2=q13,<c01=d1,#12
vshll.u32 q13,d1,#12
# qhasm: d01 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]!
vld1.8 {d22-d23},[r1]!
# qhasm: r2[0,1] += x01[0] unsigned* z12[2]; r2[2,3] += x01[1] unsigned* z12[3]
# asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top
# asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5
vmlal.u32 q13,d16,d5
# qhasm: r2[0,1] += x01[2] unsigned* z12[0]; r2[2,3] += x01[3] unsigned* z12[1]
# asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4
vmlal.u32 q13,d17,d4
# qhasm: r2[0,1] += x23[0] unsigned* z0[0]; r2[2,3] += x23[1] unsigned* z0[1]
# asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6
vmlal.u32 q13,d18,d6
# qhasm: r2[0,1] += x23[2] unsigned* 5z34[2]; r2[2,3] += x23[3] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11
vmlal.u32 q13,d19,d11
# qhasm: r2[0,1] += x4[0] unsigned* 5z34[0]; r2[2,3] += x4[1] unsigned* 5z34[1]
# asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot
# asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10
vmlal.u32 q13,d20,d10
# qhasm: r0 = r0[0,1]c01[1]r0[2]
# asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1
# asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1
vext.32 d15,d0,d15,#1
# qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
# asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6
# asm 2: vshll.u32 >r1=q14,<c23=d2,#6
vshll.u32 q14,d2,#6
# qhasm: r1[0,1] += x01[0] unsigned* z12[0]; r1[2,3] += x01[1] unsigned* z12[1]
# asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4
vmlal.u32 q14,d16,d4
# qhasm: r1[0,1] += x01[2] unsigned* z0[0]; r1[2,3] += x01[3] unsigned* z0[1]
# asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6
vmlal.u32 q14,d17,d6
# qhasm: r1[0,1] += x23[0] unsigned* 5z34[2]; r1[2,3] += x23[1] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11
vmlal.u32 q14,d18,d11
# qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1]
# asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot
# asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10
vmlal.u32 q14,d19,d10
# qhasm: ptr = &5z12_stack
# asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10
# asm 2: lea >ptr=r2,<5z12_stack=[sp,#144]
add r2,sp,#144
# qhasm: 5z12 aligned= mem128[ptr]
# asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128]
vld1.8 {d0-d1},[r2,: 128]
# qhasm: r1[0,1] += x4[0] unsigned* 5z12[2]; r1[2,3] += x4[1] unsigned* 5z12[3]
# asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top
# asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1
vmlal.u32 q14,d20,d1
# qhasm: d23 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]!
vld1.8 {d2-d3},[r1]!
# qhasm: input_2 += 32
# asm 1: add >input_2=int32#2,<input_2=int32#2,#32
# asm 2: add >input_2=r1,<input_2=r1,#32
add r1,r1,#32
# qhasm: r0[0,1] += x4[0] unsigned* 5z12[0]; r0[2,3] += x4[1] unsigned* 5z12[1]
# asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot
# asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0
vmlal.u32 q7,d20,d0
# qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1]
# asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot
# asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10
vmlal.u32 q7,d18,d10
# qhasm: d01 d23 = d01[0] d23[0] d01[1] d23[1]
# asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top
# asm 2: vswp <d23=d2,<d01=d23
vswp d2,d23
# qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3]
# asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top
# asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1
vmlal.u32 q7,d19,d1
# qhasm: r0[0,1] += x01[0] unsigned* z0[0]; r0[2,3] += x01[1] unsigned* z0[1]
# asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6
vmlal.u32 q7,d16,d6
# qhasm: new mid
# qhasm: 2x v4 = d23 unsigned>> 40
# asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40
# asm 2: vshr.u64 >v4=q3,<d23=q1,#40
vshr.u64 q3,q1,#40
# qhasm: mid = d01[1]d23[0] mid[2,3]
# asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1
# asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1
vext.32 d0,d22,d2,#1
# qhasm: new v23
# qhasm: v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14
# asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14
# asm 2: vshrn.u64 <v23=d19,<d23=q1,#14
vshrn.u64 d19,q1,#14
# qhasm: mid = mid[0,1] d01[3]d23[2]
# asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1
# asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1
vext.32 d1,d23,d3,#1
# qhasm: new v01
# qhasm: v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26
# asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26
# asm 2: vshrn.u64 <v01=d21,<d01=q11,#26
vshrn.u64 d21,q11,#26
# qhasm: v01 = d01[1]d01[0] v01[2,3]
# asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1
# asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1
vext.32 d20,d22,d22,#1
# qhasm: r0[0,1] += x01[2] unsigned* 5z34[2]; r0[2,3] += x01[3] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11
vmlal.u32 q7,d17,d11
# qhasm: v01 = v01[1]d01[2] v01[2,3]
# asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1
# asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1
vext.32 d20,d20,d23,#1
# qhasm: v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20
# asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20
# asm 2: vshrn.u64 <v23=d18,<mid=q0,#20
vshrn.u64 d18,q0,#20
# qhasm: v4 = v4[0]v4[2]v4[1]v4[3]
# asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top
# asm 2: vtrn.32 <v4=d6,<v4=d7
vtrn.32 d6,d7
# qhasm: 4x v01 &= 0x03ffffff
# asm 1: vand.i32 <v01=reg128#11,#0x03ffffff
# asm 2: vand.i32 <v01=q10,#0x03ffffff
vand.i32 q10,#0x03ffffff
# qhasm: ptr = &y34_stack
# asm 1: lea >ptr=int32#3,<y34_stack=stack128#4
# asm 2: lea >ptr=r2,<y34_stack=[sp,#48]
add r2,sp,#48
# qhasm: y34 aligned= mem128[ptr]
# asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128]
vld1.8 {d4-d5},[r2,: 128]
# qhasm: 4x v23 &= 0x03ffffff
# asm 1: vand.i32 <v23=reg128#10,#0x03ffffff
# asm 2: vand.i32 <v23=q9,#0x03ffffff
vand.i32 q9,#0x03ffffff
# qhasm: ptr = &y12_stack
# asm 1: lea >ptr=int32#3,<y12_stack=stack128#3
# asm 2: lea >ptr=r2,<y12_stack=[sp,#32]
add r2,sp,#32
# qhasm: y12 aligned= mem128[ptr]
# asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128]
vld1.8 {d2-d3},[r2,: 128]
# qhasm: 4x v4 |= 0x01000000
# asm 1: vorr.i32 <v4=reg128#4,#0x01000000
# asm 2: vorr.i32 <v4=q3,#0x01000000
vorr.i32 q3,#0x01000000
# qhasm: ptr = &y0_stack
# asm 1: lea >ptr=int32#3,<y0_stack=stack128#2
# asm 2: lea >ptr=r2,<y0_stack=[sp,#16]
add r2,sp,#16
# qhasm: y0 aligned= mem128[ptr]
# asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128]
vld1.8 {d0-d1},[r2,: 128]
# qhasm: r4[0,1] += v01[0] unsigned* y34[2]; r4[2,3] += v01[1] unsigned* y34[3]
# asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top
# asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5
vmlal.u32 q15,d20,d5
# qhasm: r4[0,1] += v01[2] unsigned* y34[0]; r4[2,3] += v01[3] unsigned* y34[1]
# asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4
vmlal.u32 q15,d21,d4
# qhasm: r4[0,1] += v23[0] unsigned* y12[2]; r4[2,3] += v23[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3
vmlal.u32 q15,d18,d3
# qhasm: r4[0,1] += v23[2] unsigned* y12[0]; r4[2,3] += v23[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2
vmlal.u32 q15,d19,d2
# qhasm: r4[0,1] += v4[0] unsigned* y0[0]; r4[2,3] += v4[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0
vmlal.u32 q15,d6,d0
# qhasm: ptr = &5y34_stack
# asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6
# asm 2: lea >ptr=r2,<5y34_stack=[sp,#80]
add r2,sp,#80
# qhasm: 5y34 aligned= mem128[ptr]
# asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128]
vld1.8 {d24-d25},[r2,: 128]
# qhasm: r3[0,1] += v01[0] unsigned* y34[0]; r3[2,3] += v01[1] unsigned* y34[1]
# asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4
vmlal.u32 q4,d20,d4
# qhasm: r3[0,1] += v01[2] unsigned* y12[2]; r3[2,3] += v01[3] unsigned* y12[3]
# asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top
# asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3
vmlal.u32 q4,d21,d3
# qhasm: r3[0,1] += v23[0] unsigned* y12[0]; r3[2,3] += v23[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2
vmlal.u32 q4,d18,d2
# qhasm: r3[0,1] += v23[2] unsigned* y0[0]; r3[2,3] += v23[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0
vmlal.u32 q4,d19,d0
# qhasm: r3[0,1] += v4[0] unsigned* 5y34[2]; r3[2,3] += v4[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25
vmlal.u32 q4,d6,d25
# qhasm: ptr = &5y12_stack
# asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5
# asm 2: lea >ptr=r2,<5y12_stack=[sp,#64]
add r2,sp,#64
# qhasm: 5y12 aligned= mem128[ptr]
# asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128]
vld1.8 {d22-d23},[r2,: 128]
# qhasm: r0[0,1] += v4[0] unsigned* 5y12[0]; r0[2,3] += v4[1] unsigned* 5y12[1]
# asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot
# asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22
vmlal.u32 q7,d6,d22
# qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24
vmlal.u32 q7,d18,d24
# qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23
vmlal.u32 q7,d19,d23
# qhasm: r0[0,1] += v01[0] unsigned* y0[0]; r0[2,3] += v01[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0
vmlal.u32 q7,d20,d0
# qhasm: r0[0,1] += v01[2] unsigned* 5y34[2]; r0[2,3] += v01[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25
vmlal.u32 q7,d21,d25
# qhasm: r1[0,1] += v01[0] unsigned* y12[0]; r1[2,3] += v01[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2
vmlal.u32 q14,d20,d2
# qhasm: r1[0,1] += v01[2] unsigned* y0[0]; r1[2,3] += v01[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0
vmlal.u32 q14,d21,d0
# qhasm: r1[0,1] += v23[0] unsigned* 5y34[2]; r1[2,3] += v23[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25
vmlal.u32 q14,d18,d25
# qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24
vmlal.u32 q14,d19,d24
# qhasm: r1[0,1] += v4[0] unsigned* 5y12[2]; r1[2,3] += v4[1] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23
vmlal.u32 q14,d6,d23
# qhasm: r2[0,1] += v01[0] unsigned* y12[2]; r2[2,3] += v01[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3
vmlal.u32 q13,d20,d3
# qhasm: r2[0,1] += v01[2] unsigned* y12[0]; r2[2,3] += v01[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2
vmlal.u32 q13,d21,d2
# qhasm: r2[0,1] += v23[0] unsigned* y0[0]; r2[2,3] += v23[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0
vmlal.u32 q13,d18,d0
# qhasm: r2[0,1] += v23[2] unsigned* 5y34[2]; r2[2,3] += v23[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25
vmlal.u32 q13,d19,d25
# qhasm: r2[0,1] += v4[0] unsigned* 5y34[0]; r2[2,3] += v4[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24
vmlal.u32 q13,d6,d24
# qhasm: ptr = &two24
# asm 1: lea >ptr=int32#3,<two24=stack128#1
# asm 2: lea >ptr=r2,<two24=[sp,#0]
add r2,sp,#0
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26
# asm 2: vshr.u64 >t1=q3,<r0=q7,#26
vshr.u64 q3,q7,#26
# qhasm: len -= 64
# asm 1: sub >len=int32#4,<len=int32#4,#64
# asm 2: sub >len=r3,<len=r3,#64
sub r3,r3,#64
# qhasm: r0 &= mask
# asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7
# asm 2: vand >r0=q5,<r0=q7,<mask=q6
vand q5,q7,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4
# asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3
vadd.i64 q3,q14,q3
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26
# asm 2: vshr.u64 >t4=q7,<r3=q4,#26
vshr.u64 q7,q4,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
# asm 2: vand >r3=q4,<r3=q4,<mask=q6
vand q4,q4,q6
# qhasm: 2x x4 = r4 + t4
# asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8
# asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7
vadd.i64 q7,q15,q7
# qhasm: r4 aligned= mem128[ptr]
# asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128]
vld1.8 {d30-d31},[r2,: 128]
# qhasm: 2x t2 = r1 unsigned>> 26
# asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26
# asm 2: vshr.u64 >t2=q8,<r1=q3,#26
vshr.u64 q8,q3,#26
# qhasm: r1 &= mask
# asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7
# asm 2: vand >r1=q3,<r1=q3,<mask=q6
vand q3,q3,q6
# qhasm: 2x t0 = x4 unsigned>> 26
# asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26
# asm 2: vshr.u64 >t0=q9,<x4=q7,#26
vshr.u64 q9,q7,#26
# qhasm: 2x r2 += t2
# asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9
# asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8
vadd.i64 q8,q13,q8
# qhasm: x4 &= mask
# asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7
# asm 2: vand >x4=q10,<x4=q7,<mask=q6
vand q10,q7,q6
# qhasm: 2x x01 = r0 + t0
# asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10
# asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9
vadd.i64 q5,q5,q9
# qhasm: r0 aligned= mem128[ptr]
# asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128]
vld1.8 {d14-d15},[r2,: 128]
# qhasm: ptr = &z34_stack
# asm 1: lea >ptr=int32#3,<z34_stack=stack128#9
# asm 2: lea >ptr=r2,<z34_stack=[sp,#128]
add r2,sp,#128
# qhasm: 2x t0 <<= 2
# asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2
# asm 2: vshl.i64 >t0=q9,<t0=q9,#2
vshl.i64 q9,q9,#2
# qhasm: 2x t3 = r2 unsigned>> 26
# asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26
# asm 2: vshr.u64 >t3=q13,<r2=q8,#26
vshr.u64 q13,q8,#26
# qhasm: 2x x01 += t0
# asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10
# asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9
vadd.i64 q14,q5,q9
# qhasm: z34 aligned= mem128[ptr]
# asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128]
vld1.8 {d10-d11},[r2,: 128]
# qhasm: x23 = r2 & mask
# asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7
# asm 2: vand >x23=q9,<r2=q8,<mask=q6
vand q9,q8,q6
# qhasm: 2x r3 += t3
# asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14
# asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13
vadd.i64 q4,q4,q13
# qhasm: input_2 += 32
# asm 1: add >input_2=int32#2,<input_2=int32#2,#32
# asm 2: add >input_2=r1,<input_2=r1,#32
add r1,r1,#32
# qhasm: 2x t1 = x01 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26
# asm 2: vshr.u64 >t1=q13,<x01=q14,#26
vshr.u64 q13,q14,#26
# qhasm: x23 = x23[0,2,1,3]
# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
# asm 2: vtrn.32 <x23=d18,<x23=d19
vtrn.32 d18,d19
# qhasm: x01 = x01 & mask
# asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7
# asm 2: vand >x01=q8,<x01=q14,<mask=q6
vand q8,q14,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14
# asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13
vadd.i64 q3,q3,q13
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26
# asm 2: vshr.u64 >t4=q13,<r3=q4,#26
vshr.u64 q13,q4,#26
# qhasm: x01 = x01[0,2,1,3]
# asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
# asm 2: vtrn.32 <x01=d16,<x01=d17
vtrn.32 d16,d17
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
# asm 2: vand >r3=q4,<r3=q4,<mask=q6
vand q4,q4,q6
# qhasm: r1 = r1[0,2,1,3]
# asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
# asm 2: vtrn.32 <r1=d6,<r1=d7
vtrn.32 d6,d7
# qhasm: 2x x4 += t4
# asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14
# asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13
vadd.i64 q10,q10,q13
# qhasm: r3 = r3[0,2,1,3]
# asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top
# asm 2: vtrn.32 <r3=d8,<r3=d9
vtrn.32 d8,d9
# qhasm: x01 = x01[0,1] r1[0,1]
# asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
# asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
vext.32 d17,d6,d6,#0
# qhasm: x23 = x23[0,1] r3[0,1]
# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0
# asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0
vext.32 d19,d8,d8,#0
# qhasm: x4 = x4[0,2,1,3]
# asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
# asm 2: vtrn.32 <x4=d20,<x4=d21
vtrn.32 d20,d21
# qhasm: unsigned>? len - 64
# asm 1: cmp <len=int32#4,#64
# asm 2: cmp <len=r3,#64
cmp r3,#64
# qhasm: goto mainloop2 if unsigned>
bhi ._mainloop2
# qhasm: input_2 -= 32
# asm 1: sub >input_2=int32#3,<input_2=int32#2,#32
# asm 2: sub >input_2=r2,<input_2=r1,#32
sub r2,r1,#32
# qhasm: below64bytes:
._below64bytes:
# qhasm: unsigned>? len - 32
# asm 1: cmp <len=int32#4,#32
# asm 2: cmp <len=r3,#32
cmp r3,#32
# qhasm: goto end if !unsigned>
bls ._end
# qhasm: mainloop:
._mainloop:
# qhasm: new r0
# qhasm: ptr = &two24
# asm 1: lea >ptr=int32#2,<two24=stack128#1
# asm 2: lea >ptr=r1,<two24=[sp,#0]
add r1,sp,#0
# qhasm: r4 aligned= mem128[ptr]
# asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128]
# asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128]
vld1.8 {d8-d9},[r1,: 128]
# qhasm: u4 aligned= mem128[ptr]
# asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128]
# asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128]
vld1.8 {d10-d11},[r1,: 128]
# qhasm: c01 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]!
# asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]!
vld1.8 {d14-d15},[r2]!
# qhasm: r4[0,1] += x01[0] unsigned* y34[2]; r4[2,3] += x01[1] unsigned* y34[3]
# asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top
# asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5
vmlal.u32 q4,d16,d5
# qhasm: c23 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]!
# asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]!
vld1.8 {d26-d27},[r2]!
# qhasm: r4[0,1] += x01[2] unsigned* y34[0]; r4[2,3] += x01[3] unsigned* y34[1]
# asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4
vmlal.u32 q4,d17,d4
# qhasm: r0 = u4[1]c01[0]r0[2,3]
# asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1
# asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1
vext.32 d6,d10,d14,#1
# qhasm: r4[0,1] += x23[0] unsigned* y12[2]; r4[2,3] += x23[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3
vmlal.u32 q4,d18,d3
# qhasm: r0 = r0[0,1]u4[1]c23[0]
# asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1
# asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1
vext.32 d7,d10,d26,#1
# qhasm: r4[0,1] += x23[2] unsigned* y12[0]; r4[2,3] += x23[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2
vmlal.u32 q4,d19,d2
# qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
# asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4
# asm 2: vrev64.i32 >r0=q3,<r0=q3
vrev64.i32 q3,q3
# qhasm: r4[0,1] += x4[0] unsigned* y0[0]; r4[2,3] += x4[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0
vmlal.u32 q4,d20,d0
# qhasm: r0[0,1] += x4[0] unsigned* 5y12[0]; r0[2,3] += x4[1] unsigned* 5y12[1]
# asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot
# asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22
vmlal.u32 q3,d20,d22
# qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24
vmlal.u32 q3,d18,d24
# qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23
vmlal.u32 q3,d19,d23
# qhasm: c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3]
# asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14
# asm 2: vtrn.32 <c01=q7,<c23=q13
vtrn.32 q7,q13
# qhasm: r0[0,1] += x01[0] unsigned* y0[0]; r0[2,3] += x01[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0
vmlal.u32 q3,d16,d0
# qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
# asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18
# asm 2: vshll.u32 >r3=q5,<c23=d27,#18
vshll.u32 q5,d27,#18
# qhasm: r0[0,1] += x01[2] unsigned* 5y34[2]; r0[2,3] += x01[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25
vmlal.u32 q3,d17,d25
# qhasm: r3[0,1] += x01[0] unsigned* y34[0]; r3[2,3] += x01[1] unsigned* y34[1]
# asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4
vmlal.u32 q5,d16,d4
# qhasm: r3[0,1] += x01[2] unsigned* y12[2]; r3[2,3] += x01[3] unsigned* y12[3]
# asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top
# asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3
vmlal.u32 q5,d17,d3
# qhasm: r3[0,1] += x23[0] unsigned* y12[0]; r3[2,3] += x23[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2
vmlal.u32 q5,d18,d2
# qhasm: r3[0,1] += x23[2] unsigned* y0[0]; r3[2,3] += x23[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0
vmlal.u32 q5,d19,d0
# qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
# asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6
# asm 2: vshll.u32 >r1=q13,<c23=d26,#6
vshll.u32 q13,d26,#6
# qhasm: r3[0,1] += x4[0] unsigned* 5y34[2]; r3[2,3] += x4[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25
vmlal.u32 q5,d20,d25
# qhasm: r1[0,1] += x01[0] unsigned* y12[0]; r1[2,3] += x01[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2
vmlal.u32 q13,d16,d2
# qhasm: r1[0,1] += x01[2] unsigned* y0[0]; r1[2,3] += x01[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0
vmlal.u32 q13,d17,d0
# qhasm: r1[0,1] += x23[0] unsigned* 5y34[2]; r1[2,3] += x23[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25
vmlal.u32 q13,d18,d25
# qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24
vmlal.u32 q13,d19,d24
# qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
# asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12
# asm 2: vshll.u32 >r2=q7,<c01=d15,#12
vshll.u32 q7,d15,#12
# qhasm: r1[0,1] += x4[0] unsigned* 5y12[2]; r1[2,3] += x4[1] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23
vmlal.u32 q13,d20,d23
# qhasm: r2[0,1] += x01[0] unsigned* y12[2]; r2[2,3] += x01[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3
vmlal.u32 q7,d16,d3
# qhasm: r2[0,1] += x01[2] unsigned* y12[0]; r2[2,3] += x01[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2
vmlal.u32 q7,d17,d2
# qhasm: r2[0,1] += x23[0] unsigned* y0[0]; r2[2,3] += x23[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0
vmlal.u32 q7,d18,d0
# qhasm: r2[0,1] += x23[2] unsigned* 5y34[2]; r2[2,3] += x23[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25
vmlal.u32 q7,d19,d25
# qhasm: r2[0,1] += x4[0] unsigned* 5y34[0]; r2[2,3] += x4[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24
vmlal.u32 q7,d20,d24
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26
# asm 2: vshr.u64 >t1=q8,<r0=q3,#26
vshr.u64 q8,q3,#26
# qhasm: r0 &= mask
# asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7
# asm 2: vand >r0=q3,<r0=q3,<mask=q6
vand q3,q3,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9
# asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8
vadd.i64 q8,q13,q8
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26
# asm 2: vshr.u64 >t4=q9,<r3=q5,#26
vshr.u64 q9,q5,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
# asm 2: vand >r3=q5,<r3=q5,<mask=q6
vand q5,q5,q6
# qhasm: 2x r4 += t4
# asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10
# asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9
vadd.i64 q4,q4,q9
# qhasm: 2x t2 = r1 unsigned>> 26
# asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26
# asm 2: vshr.u64 >t2=q9,<r1=q8,#26
vshr.u64 q9,q8,#26
# qhasm: r1 &= mask
# asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7
# asm 2: vand >r1=q10,<r1=q8,<mask=q6
vand q10,q8,q6
# qhasm: 2x t0 = r4 unsigned>> 26
# asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26
# asm 2: vshr.u64 >t0=q8,<r4=q4,#26
vshr.u64 q8,q4,#26
# qhasm: 2x r2 += t2
# asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10
# asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9
vadd.i64 q7,q7,q9
# qhasm: r4 &= mask
# asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7
# asm 2: vand >r4=q4,<r4=q4,<mask=q6
vand q4,q4,q6
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
vadd.i64 q3,q3,q8
# qhasm: 2x t0 <<= 2
# asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2
# asm 2: vshl.i64 >t0=q8,<t0=q8,#2
vshl.i64 q8,q8,#2
# qhasm: 2x t3 = r2 unsigned>> 26
# asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26
# asm 2: vshr.u64 >t3=q13,<r2=q7,#26
vshr.u64 q13,q7,#26
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
vadd.i64 q3,q3,q8
# qhasm: x23 = r2 & mask
# asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7
# asm 2: vand >x23=q9,<r2=q7,<mask=q6
vand q9,q7,q6
# qhasm: 2x r3 += t3
# asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14
# asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13
vadd.i64 q5,q5,q13
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26
# asm 2: vshr.u64 >t1=q7,<r0=q3,#26
vshr.u64 q7,q3,#26
# qhasm: x01 = r0 & mask
# asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7
# asm 2: vand >x01=q8,<r0=q3,<mask=q6
vand q8,q3,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8
# asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7
vadd.i64 q3,q10,q7
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26
# asm 2: vshr.u64 >t4=q7,<r3=q5,#26
vshr.u64 q7,q5,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
# asm 2: vand >r3=q5,<r3=q5,<mask=q6
vand q5,q5,q6
# qhasm: 2x x4 = r4 + t4
# asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8
# asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7
vadd.i64 q10,q4,q7
# qhasm: len -= 32
# asm 1: sub >len=int32#4,<len=int32#4,#32
# asm 2: sub >len=r3,<len=r3,#32
sub r3,r3,#32
# qhasm: x01 = x01[0,2,1,3]
# asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
# asm 2: vtrn.32 <x01=d16,<x01=d17
vtrn.32 d16,d17
# qhasm: x23 = x23[0,2,1,3]
# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
# asm 2: vtrn.32 <x23=d18,<x23=d19
vtrn.32 d18,d19
# qhasm: r1 = r1[0,2,1,3]
# asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
# asm 2: vtrn.32 <r1=d6,<r1=d7
vtrn.32 d6,d7
# qhasm: r3 = r3[0,2,1,3]
# asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top
# asm 2: vtrn.32 <r3=d10,<r3=d11
vtrn.32 d10,d11
# qhasm: x4 = x4[0,2,1,3]
# asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
# asm 2: vtrn.32 <x4=d20,<x4=d21
vtrn.32 d20,d21
# qhasm: x01 = x01[0,1] r1[0,1]
# asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
# asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
vext.32 d17,d6,d6,#0
# qhasm: x23 = x23[0,1] r3[0,1]
# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0
# asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0
vext.32 d19,d10,d10,#0
# qhasm: unsigned>? len - 32
# asm 1: cmp <len=int32#4,#32
# asm 2: cmp <len=r3,#32
cmp r3,#32
# qhasm: goto mainloop if unsigned>
bhi ._mainloop
# qhasm: end:
._end:
# qhasm: mem128[input_0] = x01;input_0+=16
# asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]!
# asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]!
vst1.8 {d16-d17},[r0]!
# qhasm: mem128[input_0] = x23;input_0+=16
# asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]!
# asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]!
vst1.8 {d18-d19},[r0]!
# qhasm: mem64[input_0] = x4[0]
# asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1]
# asm 2: vst1.8 <x4=d20,[<input_0=r0]
vst1.8 d20,[r0]
# qhasm: len = len
# asm 1: mov >len=int32#1,<len=int32#4
# asm 2: mov >len=r0,<len=r3
mov r0,r3
# qhasm: qpopreturn len
mov sp,r12
vpop {q4,q5,q6,q7}
bx lr
# qhasm: int32 input_0
# qhasm: int32 input_1
# qhasm: int32 input_2
# qhasm: int32 input_3
# qhasm: stack32 input_4
# qhasm: stack32 input_5
# qhasm: stack32 input_6
# qhasm: stack32 input_7
# qhasm: int32 caller_r4
# qhasm: int32 caller_r5
# qhasm: int32 caller_r6
# qhasm: int32 caller_r7
# qhasm: int32 caller_r8
# qhasm: int32 caller_r9
# qhasm: int32 caller_r10
# qhasm: int32 caller_r11
# qhasm: int32 caller_r12
# qhasm: int32 caller_r14
# qhasm: reg128 caller_q4
# qhasm: reg128 caller_q5
# qhasm: reg128 caller_q6
# qhasm: reg128 caller_q7
# qhasm: reg128 r0
# qhasm: reg128 r1
# qhasm: reg128 r2
# qhasm: reg128 r3
# qhasm: reg128 r4
# qhasm: reg128 x01
# qhasm: reg128 x23
# qhasm: reg128 x4
# qhasm: reg128 y01
# qhasm: reg128 y23
# qhasm: reg128 y4
# qhasm: reg128 _5y01
# qhasm: reg128 _5y23
# qhasm: reg128 _5y4
# qhasm: reg128 c01
# qhasm: reg128 c23
# qhasm: reg128 c4
# qhasm: reg128 t0
# qhasm: reg128 t1
# qhasm: reg128 t2
# qhasm: reg128 t3
# qhasm: reg128 t4
# qhasm: reg128 mask
# qhasm: enter crypto_onetimeauth_poly1305_neon2_addmulmod
.align 2
.global openssl_poly1305_neon2_addmulmod
.hidden openssl_poly1305_neon2_addmulmod
.type openssl_poly1305_neon2_addmulmod STT_FUNC
openssl_poly1305_neon2_addmulmod:
sub sp,sp,#0
# qhasm: 2x mask = 0xffffffff
# asm 1: vmov.i64 >mask=reg128#1,#0xffffffff
# asm 2: vmov.i64 >mask=q0,#0xffffffff
vmov.i64 q0,#0xffffffff
# qhasm: y01 aligned= mem128[input_2];input_2+=16
# asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[<input_2=int32#3,: 128]!
# asm 2: vld1.8 {>y01=d2->y01=d3},[<input_2=r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
# qhasm: 4x _5y01 = y01 << 2
# asm 1: vshl.i32 >_5y01=reg128#3,<y01=reg128#2,#2
# asm 2: vshl.i32 >_5y01=q2,<y01=q1,#2
vshl.i32 q2,q1,#2
# qhasm: y23 aligned= mem128[input_2];input_2+=16
# asm 1: vld1.8 {>y23=reg128#4%bot->y23=reg128#4%top},[<input_2=int32#3,: 128]!
# asm 2: vld1.8 {>y23=d6->y23=d7},[<input_2=r2,: 128]!
vld1.8 {d6-d7},[r2,: 128]!
# qhasm: 4x _5y23 = y23 << 2
# asm 1: vshl.i32 >_5y23=reg128#9,<y23=reg128#4,#2
# asm 2: vshl.i32 >_5y23=q8,<y23=q3,#2
vshl.i32 q8,q3,#2
# qhasm: y4 aligned= mem64[input_2]y4[1]
# asm 1: vld1.8 {<y4=reg128#10%bot},[<input_2=int32#3,: 64]
# asm 2: vld1.8 {<y4=d18},[<input_2=r2,: 64]
vld1.8 {d18},[r2,: 64]
# qhasm: 4x _5y4 = y4 << 2
# asm 1: vshl.i32 >_5y4=reg128#11,<y4=reg128#10,#2
# asm 2: vshl.i32 >_5y4=q10,<y4=q9,#2
vshl.i32 q10,q9,#2
# qhasm: x01 aligned= mem128[input_1];input_1+=16
# asm 1: vld1.8 {>x01=reg128#12%bot->x01=reg128#12%top},[<input_1=int32#2,: 128]!
# asm 2: vld1.8 {>x01=d22->x01=d23},[<input_1=r1,: 128]!
vld1.8 {d22-d23},[r1,: 128]!
# qhasm: 4x _5y01 += y01
# asm 1: vadd.i32 >_5y01=reg128#3,<_5y01=reg128#3,<y01=reg128#2
# asm 2: vadd.i32 >_5y01=q2,<_5y01=q2,<y01=q1
vadd.i32 q2,q2,q1
# qhasm: x23 aligned= mem128[input_1];input_1+=16
# asm 1: vld1.8 {>x23=reg128#13%bot->x23=reg128#13%top},[<input_1=int32#2,: 128]!
# asm 2: vld1.8 {>x23=d24->x23=d25},[<input_1=r1,: 128]!
vld1.8 {d24-d25},[r1,: 128]!
# qhasm: 4x _5y23 += y23
# asm 1: vadd.i32 >_5y23=reg128#9,<_5y23=reg128#9,<y23=reg128#4
# asm 2: vadd.i32 >_5y23=q8,<_5y23=q8,<y23=q3
vadd.i32 q8,q8,q3
# qhasm: 4x _5y4 += y4
# asm 1: vadd.i32 >_5y4=reg128#11,<_5y4=reg128#11,<y4=reg128#10
# asm 2: vadd.i32 >_5y4=q10,<_5y4=q10,<y4=q9
vadd.i32 q10,q10,q9
# qhasm: c01 aligned= mem128[input_3];input_3+=16
# asm 1: vld1.8 {>c01=reg128#14%bot->c01=reg128#14%top},[<input_3=int32#4,: 128]!
# asm 2: vld1.8 {>c01=d26->c01=d27},[<input_3=r3,: 128]!
vld1.8 {d26-d27},[r3,: 128]!
# qhasm: 4x x01 += c01
# asm 1: vadd.i32 >x01=reg128#12,<x01=reg128#12,<c01=reg128#14
# asm 2: vadd.i32 >x01=q11,<x01=q11,<c01=q13
vadd.i32 q11,q11,q13
# qhasm: c23 aligned= mem128[input_3];input_3+=16
# asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_3=int32#4,: 128]!
# asm 2: vld1.8 {>c23=d26->c23=d27},[<input_3=r3,: 128]!
vld1.8 {d26-d27},[r3,: 128]!
# qhasm: 4x x23 += c23
# asm 1: vadd.i32 >x23=reg128#13,<x23=reg128#13,<c23=reg128#14
# asm 2: vadd.i32 >x23=q12,<x23=q12,<c23=q13
vadd.i32 q12,q12,q13
# qhasm: x4 aligned= mem64[input_1]x4[1]
# asm 1: vld1.8 {<x4=reg128#14%bot},[<input_1=int32#2,: 64]
# asm 2: vld1.8 {<x4=d26},[<input_1=r1,: 64]
vld1.8 {d26},[r1,: 64]
# qhasm: 2x mask unsigned>>=6
# asm 1: vshr.u64 >mask=reg128#1,<mask=reg128#1,#6
# asm 2: vshr.u64 >mask=q0,<mask=q0,#6
vshr.u64 q0,q0,#6
# qhasm: c4 aligned= mem64[input_3]c4[1]
# asm 1: vld1.8 {<c4=reg128#15%bot},[<input_3=int32#4,: 64]
# asm 2: vld1.8 {<c4=d28},[<input_3=r3,: 64]
vld1.8 {d28},[r3,: 64]
# qhasm: 4x x4 += c4
# asm 1: vadd.i32 >x4=reg128#14,<x4=reg128#14,<c4=reg128#15
# asm 2: vadd.i32 >x4=q13,<x4=q13,<c4=q14
vadd.i32 q13,q13,q14
# qhasm: r0[0,1] = x01[0] unsigned* y01[0]; r0[2,3] = x01[1] unsigned* y01[1]
# asm 1: vmull.u32 >r0=reg128#15,<x01=reg128#12%bot,<y01=reg128#2%bot
# asm 2: vmull.u32 >r0=q14,<x01=d22,<y01=d2
vmull.u32 q14,d22,d2
# qhasm: r0[0,1] += x01[2] unsigned* _5y4[0]; r0[2,3] += x01[3] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r0=reg128#15,<x01=reg128#12%top,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r0=q14,<x01=d23,<_5y4=d20
vmlal.u32 q14,d23,d20
# qhasm: r0[0,1] += x23[0] unsigned* _5y23[2]; r0[2,3] += x23[1] unsigned* _5y23[3]
# asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%bot,<_5y23=reg128#9%top
# asm 2: vmlal.u32 <r0=q14,<x23=d24,<_5y23=d17
vmlal.u32 q14,d24,d17
# qhasm: r0[0,1] += x23[2] unsigned* _5y23[0]; r0[2,3] += x23[3] unsigned* _5y23[1]
# asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%top,<_5y23=reg128#9%bot
# asm 2: vmlal.u32 <r0=q14,<x23=d25,<_5y23=d16
vmlal.u32 q14,d25,d16
# qhasm: r0[0,1] += x4[0] unsigned* _5y01[2]; r0[2,3] += x4[1] unsigned* _5y01[3]
# asm 1: vmlal.u32 <r0=reg128#15,<x4=reg128#14%bot,<_5y01=reg128#3%top
# asm 2: vmlal.u32 <r0=q14,<x4=d26,<_5y01=d5
vmlal.u32 q14,d26,d5
# qhasm: r1[0,1] = x01[0] unsigned* y01[2]; r1[2,3] = x01[1] unsigned* y01[3]
# asm 1: vmull.u32 >r1=reg128#3,<x01=reg128#12%bot,<y01=reg128#2%top
# asm 2: vmull.u32 >r1=q2,<x01=d22,<y01=d3
vmull.u32 q2,d22,d3
# qhasm: r1[0,1] += x01[2] unsigned* y01[0]; r1[2,3] += x01[3] unsigned* y01[1]
# asm 1: vmlal.u32 <r1=reg128#3,<x01=reg128#12%top,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r1=q2,<x01=d23,<y01=d2
vmlal.u32 q2,d23,d2
# qhasm: r1[0,1] += x23[0] unsigned* _5y4[0]; r1[2,3] += x23[1] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%bot,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r1=q2,<x23=d24,<_5y4=d20
vmlal.u32 q2,d24,d20
# qhasm: r1[0,1] += x23[2] unsigned* _5y23[2]; r1[2,3] += x23[3] unsigned* _5y23[3]
# asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%top,<_5y23=reg128#9%top
# asm 2: vmlal.u32 <r1=q2,<x23=d25,<_5y23=d17
vmlal.u32 q2,d25,d17
# qhasm: r1[0,1] += x4[0] unsigned* _5y23[0]; r1[2,3] += x4[1] unsigned* _5y23[1]
# asm 1: vmlal.u32 <r1=reg128#3,<x4=reg128#14%bot,<_5y23=reg128#9%bot
# asm 2: vmlal.u32 <r1=q2,<x4=d26,<_5y23=d16
vmlal.u32 q2,d26,d16
# qhasm: r2[0,1] = x01[0] unsigned* y23[0]; r2[2,3] = x01[1] unsigned* y23[1]
# asm 1: vmull.u32 >r2=reg128#16,<x01=reg128#12%bot,<y23=reg128#4%bot
# asm 2: vmull.u32 >r2=q15,<x01=d22,<y23=d6
vmull.u32 q15,d22,d6
# qhasm: r2[0,1] += x01[2] unsigned* y01[2]; r2[2,3] += x01[3] unsigned* y01[3]
# asm 1: vmlal.u32 <r2=reg128#16,<x01=reg128#12%top,<y01=reg128#2%top
# asm 2: vmlal.u32 <r2=q15,<x01=d23,<y01=d3
vmlal.u32 q15,d23,d3
# qhasm: r2[0,1] += x23[0] unsigned* y01[0]; r2[2,3] += x23[1] unsigned* y01[1]
# asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%bot,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r2=q15,<x23=d24,<y01=d2
vmlal.u32 q15,d24,d2
# qhasm: r2[0,1] += x23[2] unsigned* _5y4[0]; r2[2,3] += x23[3] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%top,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r2=q15,<x23=d25,<_5y4=d20
vmlal.u32 q15,d25,d20
# qhasm: r2[0,1] += x4[0] unsigned* _5y23[2]; r2[2,3] += x4[1] unsigned* _5y23[3]
# asm 1: vmlal.u32 <r2=reg128#16,<x4=reg128#14%bot,<_5y23=reg128#9%top
# asm 2: vmlal.u32 <r2=q15,<x4=d26,<_5y23=d17
vmlal.u32 q15,d26,d17
# qhasm: r3[0,1] = x01[0] unsigned* y23[2]; r3[2,3] = x01[1] unsigned* y23[3]
# asm 1: vmull.u32 >r3=reg128#9,<x01=reg128#12%bot,<y23=reg128#4%top
# asm 2: vmull.u32 >r3=q8,<x01=d22,<y23=d7
vmull.u32 q8,d22,d7
# qhasm: r3[0,1] += x01[2] unsigned* y23[0]; r3[2,3] += x01[3] unsigned* y23[1]
# asm 1: vmlal.u32 <r3=reg128#9,<x01=reg128#12%top,<y23=reg128#4%bot
# asm 2: vmlal.u32 <r3=q8,<x01=d23,<y23=d6
vmlal.u32 q8,d23,d6
# qhasm: r3[0,1] += x23[0] unsigned* y01[2]; r3[2,3] += x23[1] unsigned* y01[3]
# asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%bot,<y01=reg128#2%top
# asm 2: vmlal.u32 <r3=q8,<x23=d24,<y01=d3
vmlal.u32 q8,d24,d3
# qhasm: r3[0,1] += x23[2] unsigned* y01[0]; r3[2,3] += x23[3] unsigned* y01[1]
# asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%top,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r3=q8,<x23=d25,<y01=d2
vmlal.u32 q8,d25,d2
# qhasm: r3[0,1] += x4[0] unsigned* _5y4[0]; r3[2,3] += x4[1] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r3=reg128#9,<x4=reg128#14%bot,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r3=q8,<x4=d26,<_5y4=d20
vmlal.u32 q8,d26,d20
# qhasm: r4[0,1] = x01[0] unsigned* y4[0]; r4[2,3] = x01[1] unsigned* y4[1]
# asm 1: vmull.u32 >r4=reg128#10,<x01=reg128#12%bot,<y4=reg128#10%bot
# asm 2: vmull.u32 >r4=q9,<x01=d22,<y4=d18
vmull.u32 q9,d22,d18
# qhasm: r4[0,1] += x01[2] unsigned* y23[2]; r4[2,3] += x01[3] unsigned* y23[3]
# asm 1: vmlal.u32 <r4=reg128#10,<x01=reg128#12%top,<y23=reg128#4%top
# asm 2: vmlal.u32 <r4=q9,<x01=d23,<y23=d7
vmlal.u32 q9,d23,d7
# qhasm: r4[0,1] += x23[0] unsigned* y23[0]; r4[2,3] += x23[1] unsigned* y23[1]
# asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%bot,<y23=reg128#4%bot
# asm 2: vmlal.u32 <r4=q9,<x23=d24,<y23=d6
vmlal.u32 q9,d24,d6
# qhasm: r4[0,1] += x23[2] unsigned* y01[2]; r4[2,3] += x23[3] unsigned* y01[3]
# asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%top,<y01=reg128#2%top
# asm 2: vmlal.u32 <r4=q9,<x23=d25,<y01=d3
vmlal.u32 q9,d25,d3
# qhasm: r4[0,1] += x4[0] unsigned* y01[0]; r4[2,3] += x4[1] unsigned* y01[1]
# asm 1: vmlal.u32 <r4=reg128#10,<x4=reg128#14%bot,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r4=q9,<x4=d26,<y01=d2
vmlal.u32 q9,d26,d2
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#2,<r0=reg128#15,#26
# asm 2: vshr.u64 >t1=q1,<r0=q14,#26
vshr.u64 q1,q14,#26
# qhasm: r0 &= mask
# asm 1: vand >r0=reg128#4,<r0=reg128#15,<mask=reg128#1
# asm 2: vand >r0=q3,<r0=q14,<mask=q0
vand q3,q14,q0
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#3,<t1=reg128#2
# asm 2: vadd.i64 >r1=q1,<r1=q2,<t1=q1
vadd.i64 q1,q2,q1
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#3,<r3=reg128#9,#26
# asm 2: vshr.u64 >t4=q2,<r3=q8,#26
vshr.u64 q2,q8,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#9,<r3=reg128#9,<mask=reg128#1
# asm 2: vand >r3=q8,<r3=q8,<mask=q0
vand q8,q8,q0
# qhasm: 2x r4 += t4
# asm 1: vadd.i64 >r4=reg128#3,<r4=reg128#10,<t4=reg128#3
# asm 2: vadd.i64 >r4=q2,<r4=q9,<t4=q2
vadd.i64 q2,q9,q2
# qhasm: 2x t2 = r1 unsigned>> 26
# asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#2,#26
# asm 2: vshr.u64 >t2=q9,<r1=q1,#26
vshr.u64 q9,q1,#26
# qhasm: r1 &= mask
# asm 1: vand >r1=reg128#2,<r1=reg128#2,<mask=reg128#1
# asm 2: vand >r1=q1,<r1=q1,<mask=q0
vand q1,q1,q0
# qhasm: 2x t0 = r4 unsigned>> 26
# asm 1: vshr.u64 >t0=reg128#11,<r4=reg128#3,#26
# asm 2: vshr.u64 >t0=q10,<r4=q2,#26
vshr.u64 q10,q2,#26
# qhasm: 2x r2 += t2
# asm 1: vadd.i64 >r2=reg128#10,<r2=reg128#16,<t2=reg128#10
# asm 2: vadd.i64 >r2=q9,<r2=q15,<t2=q9
vadd.i64 q9,q15,q9
# qhasm: r4 &= mask
# asm 1: vand >r4=reg128#3,<r4=reg128#3,<mask=reg128#1
# asm 2: vand >r4=q2,<r4=q2,<mask=q0
vand q2,q2,q0
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
vadd.i64 q3,q3,q10
# qhasm: 2x t0 <<= 2
# asm 1: vshl.i64 >t0=reg128#11,<t0=reg128#11,#2
# asm 2: vshl.i64 >t0=q10,<t0=q10,#2
vshl.i64 q10,q10,#2
# qhasm: 2x t3 = r2 unsigned>> 26
# asm 1: vshr.u64 >t3=reg128#12,<r2=reg128#10,#26
# asm 2: vshr.u64 >t3=q11,<r2=q9,#26
vshr.u64 q11,q9,#26
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
vadd.i64 q3,q3,q10
# qhasm: x23 = r2 & mask
# asm 1: vand >x23=reg128#10,<r2=reg128#10,<mask=reg128#1
# asm 2: vand >x23=q9,<r2=q9,<mask=q0
vand q9,q9,q0
# qhasm: 2x r3 += t3
# asm 1: vadd.i64 >r3=reg128#9,<r3=reg128#9,<t3=reg128#12
# asm 2: vadd.i64 >r3=q8,<r3=q8,<t3=q11
vadd.i64 q8,q8,q11
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#11,<r0=reg128#4,#26
# asm 2: vshr.u64 >t1=q10,<r0=q3,#26
vshr.u64 q10,q3,#26
# qhasm: x23 = x23[0,2,1,3]
# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
# asm 2: vtrn.32 <x23=d18,<x23=d19
vtrn.32 d18,d19
# qhasm: x01 = r0 & mask
# asm 1: vand >x01=reg128#4,<r0=reg128#4,<mask=reg128#1
# asm 2: vand >x01=q3,<r0=q3,<mask=q0
vand q3,q3,q0
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#2,<t1=reg128#11
# asm 2: vadd.i64 >r1=q1,<r1=q1,<t1=q10
vadd.i64 q1,q1,q10
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#11,<r3=reg128#9,#26
# asm 2: vshr.u64 >t4=q10,<r3=q8,#26
vshr.u64 q10,q8,#26
# qhasm: x01 = x01[0,2,1,3]
# asm 1: vtrn.32 <x01=reg128#4%bot,<x01=reg128#4%top
# asm 2: vtrn.32 <x01=d6,<x01=d7
vtrn.32 d6,d7
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#1,<r3=reg128#9,<mask=reg128#1
# asm 2: vand >r3=q0,<r3=q8,<mask=q0
vand q0,q8,q0
# qhasm: r1 = r1[0,2,1,3]
# asm 1: vtrn.32 <r1=reg128#2%bot,<r1=reg128#2%top
# asm 2: vtrn.32 <r1=d2,<r1=d3
vtrn.32 d2,d3
# qhasm: 2x x4 = r4 + t4
# asm 1: vadd.i64 >x4=reg128#3,<r4=reg128#3,<t4=reg128#11
# asm 2: vadd.i64 >x4=q2,<r4=q2,<t4=q10
vadd.i64 q2,q2,q10
# qhasm: r3 = r3[0,2,1,3]
# asm 1: vtrn.32 <r3=reg128#1%bot,<r3=reg128#1%top
# asm 2: vtrn.32 <r3=d0,<r3=d1
vtrn.32 d0,d1
# qhasm: x01 = x01[0,1] r1[0,1]
# asm 1: vext.32 <x01=reg128#4%top,<r1=reg128#2%bot,<r1=reg128#2%bot,#0
# asm 2: vext.32 <x01=d7,<r1=d2,<r1=d2,#0
vext.32 d7,d2,d2,#0
# qhasm: x23 = x23[0,1] r3[0,1]
# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#1%bot,<r3=reg128#1%bot,#0
# asm 2: vext.32 <x23=d19,<r3=d0,<r3=d0,#0
vext.32 d19,d0,d0,#0
# qhasm: x4 = x4[0,2,1,3]
# asm 1: vtrn.32 <x4=reg128#3%bot,<x4=reg128#3%top
# asm 2: vtrn.32 <x4=d4,<x4=d5
vtrn.32 d4,d5
# qhasm: mem128[input_0] aligned= x01;input_0+=16
# asm 1: vst1.8 {<x01=reg128#4%bot-<x01=reg128#4%top},[<input_0=int32#1,: 128]!
# asm 2: vst1.8 {<x01=d6-<x01=d7},[<input_0=r0,: 128]!
vst1.8 {d6-d7},[r0,: 128]!
# qhasm: mem128[input_0] aligned= x23;input_0+=16
# asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1,: 128]!
# asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0,: 128]!
vst1.8 {d18-d19},[r0,: 128]!
# qhasm: mem64[input_0] aligned= x4[0]
# asm 1: vst1.8 <x4=reg128#3%bot,[<input_0=int32#1,: 64]
# asm 2: vst1.8 <x4=d4,[<input_0=r0,: 64]
vst1.8 d4,[r0,: 64]
# qhasm: return
add sp,sp,#0
bx lr
#endif /* !OPENSSL_NO_ASM && OPENSSL_ARM && __ELF__ */
|
chairq/First-choice
| 41,612
|
.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.8/crypto/curve25519/asm/x25519-asm-arm.S
|
/* Copyright (c) 2015, Google Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
/* This file is taken from crypto_scalarmult/curve25519/neon2/scalarmult.s in
* SUPERCOP 20141124 (http://bench.cr.yp.to/supercop.html). That code is public
* domain licensed but the standard ISC license is included above to keep
* licensing simple. */
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
.fpu neon
.text
.align 4
.global x25519_NEON
.hidden x25519_NEON
.type x25519_NEON, %function
x25519_NEON:
vpush {q4,q5,q6,q7}
mov r12,sp
sub sp,sp,#736
and sp,sp,#0xffffffe0
strd r4,[sp,#0]
strd r6,[sp,#8]
strd r8,[sp,#16]
strd r10,[sp,#24]
str r12,[sp,#480]
str r14,[sp,#484]
mov r0,r0
mov r1,r1
mov r2,r2
add r3,sp,#32
ldr r4,=0
ldr r5,=254
vmov.i32 q0,#1
vshr.u64 q1,q0,#7
vshr.u64 q0,q0,#8
vmov.i32 d4,#19
vmov.i32 d5,#38
add r6,sp,#512
vst1.8 {d2-d3},[r6,: 128]
add r6,sp,#528
vst1.8 {d0-d1},[r6,: 128]
add r6,sp,#544
vst1.8 {d4-d5},[r6,: 128]
add r6,r3,#0
vmov.i32 q2,#0
vst1.8 {d4-d5},[r6,: 128]!
vst1.8 {d4-d5},[r6,: 128]!
vst1.8 d4,[r6,: 64]
add r6,r3,#0
ldr r7,=960
sub r7,r7,#2
neg r7,r7
sub r7,r7,r7,LSL #7
str r7,[r6]
add r6,sp,#704
vld1.8 {d4-d5},[r1]!
vld1.8 {d6-d7},[r1]
vst1.8 {d4-d5},[r6,: 128]!
vst1.8 {d6-d7},[r6,: 128]
sub r1,r6,#16
ldrb r6,[r1]
and r6,r6,#248
strb r6,[r1]
ldrb r6,[r1,#31]
and r6,r6,#127
orr r6,r6,#64
strb r6,[r1,#31]
vmov.i64 q2,#0xffffffff
vshr.u64 q3,q2,#7
vshr.u64 q2,q2,#6
vld1.8 {d8},[r2]
vld1.8 {d10},[r2]
add r2,r2,#6
vld1.8 {d12},[r2]
vld1.8 {d14},[r2]
add r2,r2,#6
vld1.8 {d16},[r2]
add r2,r2,#4
vld1.8 {d18},[r2]
vld1.8 {d20},[r2]
add r2,r2,#6
vld1.8 {d22},[r2]
add r2,r2,#2
vld1.8 {d24},[r2]
vld1.8 {d26},[r2]
vshr.u64 q5,q5,#26
vshr.u64 q6,q6,#3
vshr.u64 q7,q7,#29
vshr.u64 q8,q8,#6
vshr.u64 q10,q10,#25
vshr.u64 q11,q11,#3
vshr.u64 q12,q12,#12
vshr.u64 q13,q13,#38
vand q4,q4,q2
vand q6,q6,q2
vand q8,q8,q2
vand q10,q10,q2
vand q2,q12,q2
vand q5,q5,q3
vand q7,q7,q3
vand q9,q9,q3
vand q11,q11,q3
vand q3,q13,q3
add r2,r3,#48
vadd.i64 q12,q4,q1
vadd.i64 q13,q10,q1
vshr.s64 q12,q12,#26
vshr.s64 q13,q13,#26
vadd.i64 q5,q5,q12
vshl.i64 q12,q12,#26
vadd.i64 q14,q5,q0
vadd.i64 q11,q11,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q11,q0
vsub.i64 q4,q4,q12
vshr.s64 q12,q14,#25
vsub.i64 q10,q10,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q12
vshl.i64 q12,q12,#25
vadd.i64 q14,q6,q1
vadd.i64 q2,q2,q13
vsub.i64 q5,q5,q12
vshr.s64 q12,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q1
vadd.i64 q7,q7,q12
vshl.i64 q12,q12,#26
vadd.i64 q15,q7,q0
vsub.i64 q11,q11,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q12
vshr.s64 q12,q15,#25
vadd.i64 q3,q3,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q3,q0
vadd.i64 q8,q8,q12
vshl.i64 q12,q12,#25
vadd.i64 q15,q8,q1
add r2,r2,#8
vsub.i64 q2,q2,q13
vshr.s64 q13,q14,#25
vsub.i64 q7,q7,q12
vshr.s64 q12,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q9,q9,q12
vtrn.32 d12,d14
vshl.i64 q12,q12,#26
vtrn.32 d13,d15
vadd.i64 q0,q9,q0
vadd.i64 q4,q4,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q6,q13,#4
vsub.i64 q7,q8,q12
vshr.s64 q0,q0,#25
vadd.i64 q4,q4,q6
vadd.i64 q6,q10,q0
vshl.i64 q0,q0,#25
vadd.i64 q8,q6,q1
vadd.i64 q4,q4,q13
vshl.i64 q10,q13,#25
vadd.i64 q1,q4,q1
vsub.i64 q0,q9,q0
vshr.s64 q8,q8,#26
vsub.i64 q3,q3,q10
vtrn.32 d14,d0
vshr.s64 q1,q1,#26
vtrn.32 d15,d1
vadd.i64 q0,q11,q8
vst1.8 d14,[r2,: 64]
vshl.i64 q7,q8,#26
vadd.i64 q5,q5,q1
vtrn.32 d4,d6
vshl.i64 q1,q1,#26
vtrn.32 d5,d7
vsub.i64 q3,q6,q7
add r2,r2,#16
vsub.i64 q1,q4,q1
vst1.8 d4,[r2,: 64]
vtrn.32 d6,d0
vtrn.32 d7,d1
sub r2,r2,#8
vtrn.32 d2,d10
vtrn.32 d3,d11
vst1.8 d6,[r2,: 64]
sub r2,r2,#24
vst1.8 d2,[r2,: 64]
add r2,r3,#96
vmov.i32 q0,#0
vmov.i64 d2,#0xff
vmov.i64 d3,#0
vshr.u32 q1,q1,#7
vst1.8 {d2-d3},[r2,: 128]!
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 d0,[r2,: 64]
add r2,r3,#144
vmov.i32 q0,#0
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 d0,[r2,: 64]
add r2,r3,#240
vmov.i32 q0,#0
vmov.i64 d2,#0xff
vmov.i64 d3,#0
vshr.u32 q1,q1,#7
vst1.8 {d2-d3},[r2,: 128]!
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 d0,[r2,: 64]
add r2,r3,#48
add r6,r3,#192
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4},[r2,: 64]
vst1.8 {d0-d1},[r6,: 128]!
vst1.8 {d2-d3},[r6,: 128]!
vst1.8 d4,[r6,: 64]
._mainloop:
mov r2,r5,LSR #3
and r6,r5,#7
ldrb r2,[r1,r2]
mov r2,r2,LSR r6
and r2,r2,#1
str r5,[sp,#488]
eor r4,r4,r2
str r2,[sp,#492]
neg r2,r4
add r4,r3,#96
add r5,r3,#192
add r6,r3,#144
vld1.8 {d8-d9},[r4,: 128]!
add r7,r3,#240
vld1.8 {d10-d11},[r5,: 128]!
veor q6,q4,q5
vld1.8 {d14-d15},[r6,: 128]!
vdup.i32 q8,r2
vld1.8 {d18-d19},[r7,: 128]!
veor q10,q7,q9
vld1.8 {d22-d23},[r4,: 128]!
vand q6,q6,q8
vld1.8 {d24-d25},[r5,: 128]!
vand q10,q10,q8
vld1.8 {d26-d27},[r6,: 128]!
veor q4,q4,q6
vld1.8 {d28-d29},[r7,: 128]!
veor q5,q5,q6
vld1.8 {d0},[r4,: 64]
veor q6,q7,q10
vld1.8 {d2},[r5,: 64]
veor q7,q9,q10
vld1.8 {d4},[r6,: 64]
veor q9,q11,q12
vld1.8 {d6},[r7,: 64]
veor q10,q0,q1
sub r2,r4,#32
vand q9,q9,q8
sub r4,r5,#32
vand q10,q10,q8
sub r5,r6,#32
veor q11,q11,q9
sub r6,r7,#32
veor q0,q0,q10
veor q9,q12,q9
veor q1,q1,q10
veor q10,q13,q14
veor q12,q2,q3
vand q10,q10,q8
vand q8,q12,q8
veor q12,q13,q10
veor q2,q2,q8
veor q10,q14,q10
veor q3,q3,q8
vadd.i32 q8,q4,q6
vsub.i32 q4,q4,q6
vst1.8 {d16-d17},[r2,: 128]!
vadd.i32 q6,q11,q12
vst1.8 {d8-d9},[r5,: 128]!
vsub.i32 q4,q11,q12
vst1.8 {d12-d13},[r2,: 128]!
vadd.i32 q6,q0,q2
vst1.8 {d8-d9},[r5,: 128]!
vsub.i32 q0,q0,q2
vst1.8 d12,[r2,: 64]
vadd.i32 q2,q5,q7
vst1.8 d0,[r5,: 64]
vsub.i32 q0,q5,q7
vst1.8 {d4-d5},[r4,: 128]!
vadd.i32 q2,q9,q10
vst1.8 {d0-d1},[r6,: 128]!
vsub.i32 q0,q9,q10
vst1.8 {d4-d5},[r4,: 128]!
vadd.i32 q2,q1,q3
vst1.8 {d0-d1},[r6,: 128]!
vsub.i32 q0,q1,q3
vst1.8 d4,[r4,: 64]
vst1.8 d0,[r6,: 64]
add r2,sp,#544
add r4,r3,#96
add r5,r3,#144
vld1.8 {d0-d1},[r2,: 128]
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4-d5},[r5,: 128]!
vzip.i32 q1,q2
vld1.8 {d6-d7},[r4,: 128]!
vld1.8 {d8-d9},[r5,: 128]!
vshl.i32 q5,q1,#1
vzip.i32 q3,q4
vshl.i32 q6,q2,#1
vld1.8 {d14},[r4,: 64]
vshl.i32 q8,q3,#1
vld1.8 {d15},[r5,: 64]
vshl.i32 q9,q4,#1
vmul.i32 d21,d7,d1
vtrn.32 d14,d15
vmul.i32 q11,q4,q0
vmul.i32 q0,q7,q0
vmull.s32 q12,d2,d2
vmlal.s32 q12,d11,d1
vmlal.s32 q12,d12,d0
vmlal.s32 q12,d13,d23
vmlal.s32 q12,d16,d22
vmlal.s32 q12,d7,d21
vmull.s32 q10,d2,d11
vmlal.s32 q10,d4,d1
vmlal.s32 q10,d13,d0
vmlal.s32 q10,d6,d23
vmlal.s32 q10,d17,d22
vmull.s32 q13,d10,d4
vmlal.s32 q13,d11,d3
vmlal.s32 q13,d13,d1
vmlal.s32 q13,d16,d0
vmlal.s32 q13,d17,d23
vmlal.s32 q13,d8,d22
vmull.s32 q1,d10,d5
vmlal.s32 q1,d11,d4
vmlal.s32 q1,d6,d1
vmlal.s32 q1,d17,d0
vmlal.s32 q1,d8,d23
vmull.s32 q14,d10,d6
vmlal.s32 q14,d11,d13
vmlal.s32 q14,d4,d4
vmlal.s32 q14,d17,d1
vmlal.s32 q14,d18,d0
vmlal.s32 q14,d9,d23
vmull.s32 q11,d10,d7
vmlal.s32 q11,d11,d6
vmlal.s32 q11,d12,d5
vmlal.s32 q11,d8,d1
vmlal.s32 q11,d19,d0
vmull.s32 q15,d10,d8
vmlal.s32 q15,d11,d17
vmlal.s32 q15,d12,d6
vmlal.s32 q15,d13,d5
vmlal.s32 q15,d19,d1
vmlal.s32 q15,d14,d0
vmull.s32 q2,d10,d9
vmlal.s32 q2,d11,d8
vmlal.s32 q2,d12,d7
vmlal.s32 q2,d13,d6
vmlal.s32 q2,d14,d1
vmull.s32 q0,d15,d1
vmlal.s32 q0,d10,d14
vmlal.s32 q0,d11,d19
vmlal.s32 q0,d12,d8
vmlal.s32 q0,d13,d17
vmlal.s32 q0,d6,d6
add r2,sp,#512
vld1.8 {d18-d19},[r2,: 128]
vmull.s32 q3,d16,d7
vmlal.s32 q3,d10,d15
vmlal.s32 q3,d11,d14
vmlal.s32 q3,d12,d9
vmlal.s32 q3,d13,d8
add r2,sp,#528
vld1.8 {d8-d9},[r2,: 128]
vadd.i64 q5,q12,q9
vadd.i64 q6,q15,q9
vshr.s64 q5,q5,#26
vshr.s64 q6,q6,#26
vadd.i64 q7,q10,q5
vshl.i64 q5,q5,#26
vadd.i64 q8,q7,q4
vadd.i64 q2,q2,q6
vshl.i64 q6,q6,#26
vadd.i64 q10,q2,q4
vsub.i64 q5,q12,q5
vshr.s64 q8,q8,#25
vsub.i64 q6,q15,q6
vshr.s64 q10,q10,#25
vadd.i64 q12,q13,q8
vshl.i64 q8,q8,#25
vadd.i64 q13,q12,q9
vadd.i64 q0,q0,q10
vsub.i64 q7,q7,q8
vshr.s64 q8,q13,#26
vshl.i64 q10,q10,#25
vadd.i64 q13,q0,q9
vadd.i64 q1,q1,q8
vshl.i64 q8,q8,#26
vadd.i64 q15,q1,q4
vsub.i64 q2,q2,q10
vshr.s64 q10,q13,#26
vsub.i64 q8,q12,q8
vshr.s64 q12,q15,#25
vadd.i64 q3,q3,q10
vshl.i64 q10,q10,#26
vadd.i64 q13,q3,q4
vadd.i64 q14,q14,q12
add r2,r3,#288
vshl.i64 q12,q12,#25
add r4,r3,#336
vadd.i64 q15,q14,q9
add r2,r2,#8
vsub.i64 q0,q0,q10
add r4,r4,#8
vshr.s64 q10,q13,#25
vsub.i64 q1,q1,q12
vshr.s64 q12,q15,#26
vadd.i64 q13,q10,q10
vadd.i64 q11,q11,q12
vtrn.32 d16,d2
vshl.i64 q12,q12,#26
vtrn.32 d17,d3
vadd.i64 q1,q11,q4
vadd.i64 q4,q5,q13
vst1.8 d16,[r2,: 64]!
vshl.i64 q5,q10,#4
vst1.8 d17,[r4,: 64]!
vsub.i64 q8,q14,q12
vshr.s64 q1,q1,#25
vadd.i64 q4,q4,q5
vadd.i64 q5,q6,q1
vshl.i64 q1,q1,#25
vadd.i64 q6,q5,q9
vadd.i64 q4,q4,q10
vshl.i64 q10,q10,#25
vadd.i64 q9,q4,q9
vsub.i64 q1,q11,q1
vshr.s64 q6,q6,#26
vsub.i64 q3,q3,q10
vtrn.32 d16,d2
vshr.s64 q9,q9,#26
vtrn.32 d17,d3
vadd.i64 q1,q2,q6
vst1.8 d16,[r2,: 64]
vshl.i64 q2,q6,#26
vst1.8 d17,[r4,: 64]
vadd.i64 q6,q7,q9
vtrn.32 d0,d6
vshl.i64 q7,q9,#26
vtrn.32 d1,d7
vsub.i64 q2,q5,q2
add r2,r2,#16
vsub.i64 q3,q4,q7
vst1.8 d0,[r2,: 64]
add r4,r4,#16
vst1.8 d1,[r4,: 64]
vtrn.32 d4,d2
vtrn.32 d5,d3
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d6,d12
vtrn.32 d7,d13
vst1.8 d4,[r2,: 64]
vst1.8 d5,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d6,[r2,: 64]
vst1.8 d7,[r4,: 64]
add r2,r3,#240
add r4,r3,#96
vld1.8 {d0-d1},[r4,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4},[r4,: 64]
add r4,r3,#144
vld1.8 {d6-d7},[r4,: 128]!
vtrn.32 q0,q3
vld1.8 {d8-d9},[r4,: 128]!
vshl.i32 q5,q0,#4
vtrn.32 q1,q4
vshl.i32 q6,q3,#4
vadd.i32 q5,q5,q0
vadd.i32 q6,q6,q3
vshl.i32 q7,q1,#4
vld1.8 {d5},[r4,: 64]
vshl.i32 q8,q4,#4
vtrn.32 d4,d5
vadd.i32 q7,q7,q1
vadd.i32 q8,q8,q4
vld1.8 {d18-d19},[r2,: 128]!
vshl.i32 q10,q2,#4
vld1.8 {d22-d23},[r2,: 128]!
vadd.i32 q10,q10,q2
vld1.8 {d24},[r2,: 64]
vadd.i32 q5,q5,q0
add r2,r3,#192
vld1.8 {d26-d27},[r2,: 128]!
vadd.i32 q6,q6,q3
vld1.8 {d28-d29},[r2,: 128]!
vadd.i32 q8,q8,q4
vld1.8 {d25},[r2,: 64]
vadd.i32 q10,q10,q2
vtrn.32 q9,q13
vadd.i32 q7,q7,q1
vadd.i32 q5,q5,q0
vtrn.32 q11,q14
vadd.i32 q6,q6,q3
add r2,sp,#560
vadd.i32 q10,q10,q2
vtrn.32 d24,d25
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q6,q13,#1
add r2,sp,#576
vst1.8 {d20-d21},[r2,: 128]
vshl.i32 q10,q14,#1
add r2,sp,#592
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q15,q12,#1
vadd.i32 q8,q8,q4
vext.32 d10,d31,d30,#0
vadd.i32 q7,q7,q1
add r2,sp,#608
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q8,d18,d5
vmlal.s32 q8,d26,d4
vmlal.s32 q8,d19,d9
vmlal.s32 q8,d27,d3
vmlal.s32 q8,d22,d8
vmlal.s32 q8,d28,d2
vmlal.s32 q8,d23,d7
vmlal.s32 q8,d29,d1
vmlal.s32 q8,d24,d6
vmlal.s32 q8,d25,d0
add r2,sp,#624
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q2,d18,d4
vmlal.s32 q2,d12,d9
vmlal.s32 q2,d13,d8
vmlal.s32 q2,d19,d3
vmlal.s32 q2,d22,d2
vmlal.s32 q2,d23,d1
vmlal.s32 q2,d24,d0
add r2,sp,#640
vst1.8 {d20-d21},[r2,: 128]
vmull.s32 q7,d18,d9
vmlal.s32 q7,d26,d3
vmlal.s32 q7,d19,d8
vmlal.s32 q7,d27,d2
vmlal.s32 q7,d22,d7
vmlal.s32 q7,d28,d1
vmlal.s32 q7,d23,d6
vmlal.s32 q7,d29,d0
add r2,sp,#656
vst1.8 {d10-d11},[r2,: 128]
vmull.s32 q5,d18,d3
vmlal.s32 q5,d19,d2
vmlal.s32 q5,d22,d1
vmlal.s32 q5,d23,d0
vmlal.s32 q5,d12,d8
add r2,sp,#672
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q4,d18,d8
vmlal.s32 q4,d26,d2
vmlal.s32 q4,d19,d7
vmlal.s32 q4,d27,d1
vmlal.s32 q4,d22,d6
vmlal.s32 q4,d28,d0
vmull.s32 q8,d18,d7
vmlal.s32 q8,d26,d1
vmlal.s32 q8,d19,d6
vmlal.s32 q8,d27,d0
add r2,sp,#576
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q7,d24,d21
vmlal.s32 q7,d25,d20
vmlal.s32 q4,d23,d21
vmlal.s32 q4,d29,d20
vmlal.s32 q8,d22,d21
vmlal.s32 q8,d28,d20
vmlal.s32 q5,d24,d20
add r2,sp,#576
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q7,d18,d6
vmlal.s32 q7,d26,d0
add r2,sp,#656
vld1.8 {d30-d31},[r2,: 128]
vmlal.s32 q2,d30,d21
vmlal.s32 q7,d19,d21
vmlal.s32 q7,d27,d20
add r2,sp,#624
vld1.8 {d26-d27},[r2,: 128]
vmlal.s32 q4,d25,d27
vmlal.s32 q8,d29,d27
vmlal.s32 q8,d25,d26
vmlal.s32 q7,d28,d27
vmlal.s32 q7,d29,d26
add r2,sp,#608
vld1.8 {d28-d29},[r2,: 128]
vmlal.s32 q4,d24,d29
vmlal.s32 q8,d23,d29
vmlal.s32 q8,d24,d28
vmlal.s32 q7,d22,d29
vmlal.s32 q7,d23,d28
add r2,sp,#608
vst1.8 {d8-d9},[r2,: 128]
add r2,sp,#560
vld1.8 {d8-d9},[r2,: 128]
vmlal.s32 q7,d24,d9
vmlal.s32 q7,d25,d31
vmull.s32 q1,d18,d2
vmlal.s32 q1,d19,d1
vmlal.s32 q1,d22,d0
vmlal.s32 q1,d24,d27
vmlal.s32 q1,d23,d20
vmlal.s32 q1,d12,d7
vmlal.s32 q1,d13,d6
vmull.s32 q6,d18,d1
vmlal.s32 q6,d19,d0
vmlal.s32 q6,d23,d27
vmlal.s32 q6,d22,d20
vmlal.s32 q6,d24,d26
vmull.s32 q0,d18,d0
vmlal.s32 q0,d22,d27
vmlal.s32 q0,d23,d26
vmlal.s32 q0,d24,d31
vmlal.s32 q0,d19,d20
add r2,sp,#640
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q2,d18,d7
vmlal.s32 q2,d19,d6
vmlal.s32 q5,d18,d6
vmlal.s32 q5,d19,d21
vmlal.s32 q1,d18,d21
vmlal.s32 q1,d19,d29
vmlal.s32 q0,d18,d28
vmlal.s32 q0,d19,d9
vmlal.s32 q6,d18,d29
vmlal.s32 q6,d19,d28
add r2,sp,#592
vld1.8 {d18-d19},[r2,: 128]
add r2,sp,#512
vld1.8 {d22-d23},[r2,: 128]
vmlal.s32 q5,d19,d7
vmlal.s32 q0,d18,d21
vmlal.s32 q0,d19,d29
vmlal.s32 q6,d18,d6
add r2,sp,#528
vld1.8 {d6-d7},[r2,: 128]
vmlal.s32 q6,d19,d21
add r2,sp,#576
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q0,d30,d8
add r2,sp,#672
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q5,d30,d29
add r2,sp,#608
vld1.8 {d24-d25},[r2,: 128]
vmlal.s32 q1,d30,d28
vadd.i64 q13,q0,q11
vadd.i64 q14,q5,q11
vmlal.s32 q6,d30,d9
vshr.s64 q4,q13,#26
vshr.s64 q13,q14,#26
vadd.i64 q7,q7,q4
vshl.i64 q4,q4,#26
vadd.i64 q14,q7,q3
vadd.i64 q9,q9,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q9,q3
vsub.i64 q0,q0,q4
vshr.s64 q4,q14,#25
vsub.i64 q5,q5,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q4
vshl.i64 q4,q4,#25
vadd.i64 q14,q6,q11
vadd.i64 q2,q2,q13
vsub.i64 q4,q7,q4
vshr.s64 q7,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q11
vadd.i64 q8,q8,q7
vshl.i64 q7,q7,#26
vadd.i64 q15,q8,q3
vsub.i64 q9,q9,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q7
vshr.s64 q7,q15,#25
vadd.i64 q10,q10,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q10,q3
vadd.i64 q1,q1,q7
add r2,r3,#144
vshl.i64 q7,q7,#25
add r4,r3,#96
vadd.i64 q15,q1,q11
add r2,r2,#8
vsub.i64 q2,q2,q13
add r4,r4,#8
vshr.s64 q13,q14,#25
vsub.i64 q7,q8,q7
vshr.s64 q8,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q12,q12,q8
vtrn.32 d12,d14
vshl.i64 q8,q8,#26
vtrn.32 d13,d15
vadd.i64 q3,q12,q3
vadd.i64 q0,q0,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q7,q13,#4
vst1.8 d13,[r4,: 64]!
vsub.i64 q1,q1,q8
vshr.s64 q3,q3,#25
vadd.i64 q0,q0,q7
vadd.i64 q5,q5,q3
vshl.i64 q3,q3,#25
vadd.i64 q6,q5,q11
vadd.i64 q0,q0,q13
vshl.i64 q7,q13,#25
vadd.i64 q8,q0,q11
vsub.i64 q3,q12,q3
vshr.s64 q6,q6,#26
vsub.i64 q7,q10,q7
vtrn.32 d2,d6
vshr.s64 q8,q8,#26
vtrn.32 d3,d7
vadd.i64 q3,q9,q6
vst1.8 d2,[r2,: 64]
vshl.i64 q6,q6,#26
vst1.8 d3,[r4,: 64]
vadd.i64 q1,q4,q8
vtrn.32 d4,d14
vshl.i64 q4,q8,#26
vtrn.32 d5,d15
vsub.i64 q5,q5,q6
add r2,r2,#16
vsub.i64 q0,q0,q4
vst1.8 d4,[r2,: 64]
add r4,r4,#16
vst1.8 d5,[r4,: 64]
vtrn.32 d10,d6
vtrn.32 d11,d7
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d0,d2
vtrn.32 d1,d3
vst1.8 d10,[r2,: 64]
vst1.8 d11,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d0,[r2,: 64]
vst1.8 d1,[r4,: 64]
add r2,r3,#288
add r4,r3,#336
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vsub.i32 q0,q0,q1
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4-d5},[r4,: 128]!
vsub.i32 q1,q1,q2
add r5,r3,#240
vld1.8 {d4},[r2,: 64]
vld1.8 {d6},[r4,: 64]
vsub.i32 q2,q2,q3
vst1.8 {d0-d1},[r5,: 128]!
vst1.8 {d2-d3},[r5,: 128]!
vst1.8 d4,[r5,: 64]
add r2,r3,#144
add r4,r3,#96
add r5,r3,#144
add r6,r3,#192
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vsub.i32 q2,q0,q1
vadd.i32 q0,q0,q1
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d6-d7},[r4,: 128]!
vsub.i32 q4,q1,q3
vadd.i32 q1,q1,q3
vld1.8 {d6},[r2,: 64]
vld1.8 {d10},[r4,: 64]
vsub.i32 q6,q3,q5
vadd.i32 q3,q3,q5
vst1.8 {d4-d5},[r5,: 128]!
vst1.8 {d0-d1},[r6,: 128]!
vst1.8 {d8-d9},[r5,: 128]!
vst1.8 {d2-d3},[r6,: 128]!
vst1.8 d12,[r5,: 64]
vst1.8 d6,[r6,: 64]
add r2,r3,#0
add r4,r3,#240
vld1.8 {d0-d1},[r4,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4},[r4,: 64]
add r4,r3,#336
vld1.8 {d6-d7},[r4,: 128]!
vtrn.32 q0,q3
vld1.8 {d8-d9},[r4,: 128]!
vshl.i32 q5,q0,#4
vtrn.32 q1,q4
vshl.i32 q6,q3,#4
vadd.i32 q5,q5,q0
vadd.i32 q6,q6,q3
vshl.i32 q7,q1,#4
vld1.8 {d5},[r4,: 64]
vshl.i32 q8,q4,#4
vtrn.32 d4,d5
vadd.i32 q7,q7,q1
vadd.i32 q8,q8,q4
vld1.8 {d18-d19},[r2,: 128]!
vshl.i32 q10,q2,#4
vld1.8 {d22-d23},[r2,: 128]!
vadd.i32 q10,q10,q2
vld1.8 {d24},[r2,: 64]
vadd.i32 q5,q5,q0
add r2,r3,#288
vld1.8 {d26-d27},[r2,: 128]!
vadd.i32 q6,q6,q3
vld1.8 {d28-d29},[r2,: 128]!
vadd.i32 q8,q8,q4
vld1.8 {d25},[r2,: 64]
vadd.i32 q10,q10,q2
vtrn.32 q9,q13
vadd.i32 q7,q7,q1
vadd.i32 q5,q5,q0
vtrn.32 q11,q14
vadd.i32 q6,q6,q3
add r2,sp,#560
vadd.i32 q10,q10,q2
vtrn.32 d24,d25
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q6,q13,#1
add r2,sp,#576
vst1.8 {d20-d21},[r2,: 128]
vshl.i32 q10,q14,#1
add r2,sp,#592
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q15,q12,#1
vadd.i32 q8,q8,q4
vext.32 d10,d31,d30,#0
vadd.i32 q7,q7,q1
add r2,sp,#608
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q8,d18,d5
vmlal.s32 q8,d26,d4
vmlal.s32 q8,d19,d9
vmlal.s32 q8,d27,d3
vmlal.s32 q8,d22,d8
vmlal.s32 q8,d28,d2
vmlal.s32 q8,d23,d7
vmlal.s32 q8,d29,d1
vmlal.s32 q8,d24,d6
vmlal.s32 q8,d25,d0
add r2,sp,#624
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q2,d18,d4
vmlal.s32 q2,d12,d9
vmlal.s32 q2,d13,d8
vmlal.s32 q2,d19,d3
vmlal.s32 q2,d22,d2
vmlal.s32 q2,d23,d1
vmlal.s32 q2,d24,d0
add r2,sp,#640
vst1.8 {d20-d21},[r2,: 128]
vmull.s32 q7,d18,d9
vmlal.s32 q7,d26,d3
vmlal.s32 q7,d19,d8
vmlal.s32 q7,d27,d2
vmlal.s32 q7,d22,d7
vmlal.s32 q7,d28,d1
vmlal.s32 q7,d23,d6
vmlal.s32 q7,d29,d0
add r2,sp,#656
vst1.8 {d10-d11},[r2,: 128]
vmull.s32 q5,d18,d3
vmlal.s32 q5,d19,d2
vmlal.s32 q5,d22,d1
vmlal.s32 q5,d23,d0
vmlal.s32 q5,d12,d8
add r2,sp,#672
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q4,d18,d8
vmlal.s32 q4,d26,d2
vmlal.s32 q4,d19,d7
vmlal.s32 q4,d27,d1
vmlal.s32 q4,d22,d6
vmlal.s32 q4,d28,d0
vmull.s32 q8,d18,d7
vmlal.s32 q8,d26,d1
vmlal.s32 q8,d19,d6
vmlal.s32 q8,d27,d0
add r2,sp,#576
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q7,d24,d21
vmlal.s32 q7,d25,d20
vmlal.s32 q4,d23,d21
vmlal.s32 q4,d29,d20
vmlal.s32 q8,d22,d21
vmlal.s32 q8,d28,d20
vmlal.s32 q5,d24,d20
add r2,sp,#576
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q7,d18,d6
vmlal.s32 q7,d26,d0
add r2,sp,#656
vld1.8 {d30-d31},[r2,: 128]
vmlal.s32 q2,d30,d21
vmlal.s32 q7,d19,d21
vmlal.s32 q7,d27,d20
add r2,sp,#624
vld1.8 {d26-d27},[r2,: 128]
vmlal.s32 q4,d25,d27
vmlal.s32 q8,d29,d27
vmlal.s32 q8,d25,d26
vmlal.s32 q7,d28,d27
vmlal.s32 q7,d29,d26
add r2,sp,#608
vld1.8 {d28-d29},[r2,: 128]
vmlal.s32 q4,d24,d29
vmlal.s32 q8,d23,d29
vmlal.s32 q8,d24,d28
vmlal.s32 q7,d22,d29
vmlal.s32 q7,d23,d28
add r2,sp,#608
vst1.8 {d8-d9},[r2,: 128]
add r2,sp,#560
vld1.8 {d8-d9},[r2,: 128]
vmlal.s32 q7,d24,d9
vmlal.s32 q7,d25,d31
vmull.s32 q1,d18,d2
vmlal.s32 q1,d19,d1
vmlal.s32 q1,d22,d0
vmlal.s32 q1,d24,d27
vmlal.s32 q1,d23,d20
vmlal.s32 q1,d12,d7
vmlal.s32 q1,d13,d6
vmull.s32 q6,d18,d1
vmlal.s32 q6,d19,d0
vmlal.s32 q6,d23,d27
vmlal.s32 q6,d22,d20
vmlal.s32 q6,d24,d26
vmull.s32 q0,d18,d0
vmlal.s32 q0,d22,d27
vmlal.s32 q0,d23,d26
vmlal.s32 q0,d24,d31
vmlal.s32 q0,d19,d20
add r2,sp,#640
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q2,d18,d7
vmlal.s32 q2,d19,d6
vmlal.s32 q5,d18,d6
vmlal.s32 q5,d19,d21
vmlal.s32 q1,d18,d21
vmlal.s32 q1,d19,d29
vmlal.s32 q0,d18,d28
vmlal.s32 q0,d19,d9
vmlal.s32 q6,d18,d29
vmlal.s32 q6,d19,d28
add r2,sp,#592
vld1.8 {d18-d19},[r2,: 128]
add r2,sp,#512
vld1.8 {d22-d23},[r2,: 128]
vmlal.s32 q5,d19,d7
vmlal.s32 q0,d18,d21
vmlal.s32 q0,d19,d29
vmlal.s32 q6,d18,d6
add r2,sp,#528
vld1.8 {d6-d7},[r2,: 128]
vmlal.s32 q6,d19,d21
add r2,sp,#576
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q0,d30,d8
add r2,sp,#672
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q5,d30,d29
add r2,sp,#608
vld1.8 {d24-d25},[r2,: 128]
vmlal.s32 q1,d30,d28
vadd.i64 q13,q0,q11
vadd.i64 q14,q5,q11
vmlal.s32 q6,d30,d9
vshr.s64 q4,q13,#26
vshr.s64 q13,q14,#26
vadd.i64 q7,q7,q4
vshl.i64 q4,q4,#26
vadd.i64 q14,q7,q3
vadd.i64 q9,q9,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q9,q3
vsub.i64 q0,q0,q4
vshr.s64 q4,q14,#25
vsub.i64 q5,q5,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q4
vshl.i64 q4,q4,#25
vadd.i64 q14,q6,q11
vadd.i64 q2,q2,q13
vsub.i64 q4,q7,q4
vshr.s64 q7,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q11
vadd.i64 q8,q8,q7
vshl.i64 q7,q7,#26
vadd.i64 q15,q8,q3
vsub.i64 q9,q9,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q7
vshr.s64 q7,q15,#25
vadd.i64 q10,q10,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q10,q3
vadd.i64 q1,q1,q7
add r2,r3,#288
vshl.i64 q7,q7,#25
add r4,r3,#96
vadd.i64 q15,q1,q11
add r2,r2,#8
vsub.i64 q2,q2,q13
add r4,r4,#8
vshr.s64 q13,q14,#25
vsub.i64 q7,q8,q7
vshr.s64 q8,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q12,q12,q8
vtrn.32 d12,d14
vshl.i64 q8,q8,#26
vtrn.32 d13,d15
vadd.i64 q3,q12,q3
vadd.i64 q0,q0,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q7,q13,#4
vst1.8 d13,[r4,: 64]!
vsub.i64 q1,q1,q8
vshr.s64 q3,q3,#25
vadd.i64 q0,q0,q7
vadd.i64 q5,q5,q3
vshl.i64 q3,q3,#25
vadd.i64 q6,q5,q11
vadd.i64 q0,q0,q13
vshl.i64 q7,q13,#25
vadd.i64 q8,q0,q11
vsub.i64 q3,q12,q3
vshr.s64 q6,q6,#26
vsub.i64 q7,q10,q7
vtrn.32 d2,d6
vshr.s64 q8,q8,#26
vtrn.32 d3,d7
vadd.i64 q3,q9,q6
vst1.8 d2,[r2,: 64]
vshl.i64 q6,q6,#26
vst1.8 d3,[r4,: 64]
vadd.i64 q1,q4,q8
vtrn.32 d4,d14
vshl.i64 q4,q8,#26
vtrn.32 d5,d15
vsub.i64 q5,q5,q6
add r2,r2,#16
vsub.i64 q0,q0,q4
vst1.8 d4,[r2,: 64]
add r4,r4,#16
vst1.8 d5,[r4,: 64]
vtrn.32 d10,d6
vtrn.32 d11,d7
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d0,d2
vtrn.32 d1,d3
vst1.8 d10,[r2,: 64]
vst1.8 d11,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d0,[r2,: 64]
vst1.8 d1,[r4,: 64]
add r2,sp,#544
add r4,r3,#144
add r5,r3,#192
vld1.8 {d0-d1},[r2,: 128]
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4-d5},[r5,: 128]!
vzip.i32 q1,q2
vld1.8 {d6-d7},[r4,: 128]!
vld1.8 {d8-d9},[r5,: 128]!
vshl.i32 q5,q1,#1
vzip.i32 q3,q4
vshl.i32 q6,q2,#1
vld1.8 {d14},[r4,: 64]
vshl.i32 q8,q3,#1
vld1.8 {d15},[r5,: 64]
vshl.i32 q9,q4,#1
vmul.i32 d21,d7,d1
vtrn.32 d14,d15
vmul.i32 q11,q4,q0
vmul.i32 q0,q7,q0
vmull.s32 q12,d2,d2
vmlal.s32 q12,d11,d1
vmlal.s32 q12,d12,d0
vmlal.s32 q12,d13,d23
vmlal.s32 q12,d16,d22
vmlal.s32 q12,d7,d21
vmull.s32 q10,d2,d11
vmlal.s32 q10,d4,d1
vmlal.s32 q10,d13,d0
vmlal.s32 q10,d6,d23
vmlal.s32 q10,d17,d22
vmull.s32 q13,d10,d4
vmlal.s32 q13,d11,d3
vmlal.s32 q13,d13,d1
vmlal.s32 q13,d16,d0
vmlal.s32 q13,d17,d23
vmlal.s32 q13,d8,d22
vmull.s32 q1,d10,d5
vmlal.s32 q1,d11,d4
vmlal.s32 q1,d6,d1
vmlal.s32 q1,d17,d0
vmlal.s32 q1,d8,d23
vmull.s32 q14,d10,d6
vmlal.s32 q14,d11,d13
vmlal.s32 q14,d4,d4
vmlal.s32 q14,d17,d1
vmlal.s32 q14,d18,d0
vmlal.s32 q14,d9,d23
vmull.s32 q11,d10,d7
vmlal.s32 q11,d11,d6
vmlal.s32 q11,d12,d5
vmlal.s32 q11,d8,d1
vmlal.s32 q11,d19,d0
vmull.s32 q15,d10,d8
vmlal.s32 q15,d11,d17
vmlal.s32 q15,d12,d6
vmlal.s32 q15,d13,d5
vmlal.s32 q15,d19,d1
vmlal.s32 q15,d14,d0
vmull.s32 q2,d10,d9
vmlal.s32 q2,d11,d8
vmlal.s32 q2,d12,d7
vmlal.s32 q2,d13,d6
vmlal.s32 q2,d14,d1
vmull.s32 q0,d15,d1
vmlal.s32 q0,d10,d14
vmlal.s32 q0,d11,d19
vmlal.s32 q0,d12,d8
vmlal.s32 q0,d13,d17
vmlal.s32 q0,d6,d6
add r2,sp,#512
vld1.8 {d18-d19},[r2,: 128]
vmull.s32 q3,d16,d7
vmlal.s32 q3,d10,d15
vmlal.s32 q3,d11,d14
vmlal.s32 q3,d12,d9
vmlal.s32 q3,d13,d8
add r2,sp,#528
vld1.8 {d8-d9},[r2,: 128]
vadd.i64 q5,q12,q9
vadd.i64 q6,q15,q9
vshr.s64 q5,q5,#26
vshr.s64 q6,q6,#26
vadd.i64 q7,q10,q5
vshl.i64 q5,q5,#26
vadd.i64 q8,q7,q4
vadd.i64 q2,q2,q6
vshl.i64 q6,q6,#26
vadd.i64 q10,q2,q4
vsub.i64 q5,q12,q5
vshr.s64 q8,q8,#25
vsub.i64 q6,q15,q6
vshr.s64 q10,q10,#25
vadd.i64 q12,q13,q8
vshl.i64 q8,q8,#25
vadd.i64 q13,q12,q9
vadd.i64 q0,q0,q10
vsub.i64 q7,q7,q8
vshr.s64 q8,q13,#26
vshl.i64 q10,q10,#25
vadd.i64 q13,q0,q9
vadd.i64 q1,q1,q8
vshl.i64 q8,q8,#26
vadd.i64 q15,q1,q4
vsub.i64 q2,q2,q10
vshr.s64 q10,q13,#26
vsub.i64 q8,q12,q8
vshr.s64 q12,q15,#25
vadd.i64 q3,q3,q10
vshl.i64 q10,q10,#26
vadd.i64 q13,q3,q4
vadd.i64 q14,q14,q12
add r2,r3,#144
vshl.i64 q12,q12,#25
add r4,r3,#192
vadd.i64 q15,q14,q9
add r2,r2,#8
vsub.i64 q0,q0,q10
add r4,r4,#8
vshr.s64 q10,q13,#25
vsub.i64 q1,q1,q12
vshr.s64 q12,q15,#26
vadd.i64 q13,q10,q10
vadd.i64 q11,q11,q12
vtrn.32 d16,d2
vshl.i64 q12,q12,#26
vtrn.32 d17,d3
vadd.i64 q1,q11,q4
vadd.i64 q4,q5,q13
vst1.8 d16,[r2,: 64]!
vshl.i64 q5,q10,#4
vst1.8 d17,[r4,: 64]!
vsub.i64 q8,q14,q12
vshr.s64 q1,q1,#25
vadd.i64 q4,q4,q5
vadd.i64 q5,q6,q1
vshl.i64 q1,q1,#25
vadd.i64 q6,q5,q9
vadd.i64 q4,q4,q10
vshl.i64 q10,q10,#25
vadd.i64 q9,q4,q9
vsub.i64 q1,q11,q1
vshr.s64 q6,q6,#26
vsub.i64 q3,q3,q10
vtrn.32 d16,d2
vshr.s64 q9,q9,#26
vtrn.32 d17,d3
vadd.i64 q1,q2,q6
vst1.8 d16,[r2,: 64]
vshl.i64 q2,q6,#26
vst1.8 d17,[r4,: 64]
vadd.i64 q6,q7,q9
vtrn.32 d0,d6
vshl.i64 q7,q9,#26
vtrn.32 d1,d7
vsub.i64 q2,q5,q2
add r2,r2,#16
vsub.i64 q3,q4,q7
vst1.8 d0,[r2,: 64]
add r4,r4,#16
vst1.8 d1,[r4,: 64]
vtrn.32 d4,d2
vtrn.32 d5,d3
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d6,d12
vtrn.32 d7,d13
vst1.8 d4,[r2,: 64]
vst1.8 d5,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d6,[r2,: 64]
vst1.8 d7,[r4,: 64]
add r2,r3,#336
add r4,r3,#288
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vadd.i32 q0,q0,q1
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4-d5},[r4,: 128]!
vadd.i32 q1,q1,q2
add r5,r3,#288
vld1.8 {d4},[r2,: 64]
vld1.8 {d6},[r4,: 64]
vadd.i32 q2,q2,q3
vst1.8 {d0-d1},[r5,: 128]!
vst1.8 {d2-d3},[r5,: 128]!
vst1.8 d4,[r5,: 64]
add r2,r3,#48
add r4,r3,#144
vld1.8 {d0-d1},[r4,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4},[r4,: 64]
add r4,r3,#288
vld1.8 {d6-d7},[r4,: 128]!
vtrn.32 q0,q3
vld1.8 {d8-d9},[r4,: 128]!
vshl.i32 q5,q0,#4
vtrn.32 q1,q4
vshl.i32 q6,q3,#4
vadd.i32 q5,q5,q0
vadd.i32 q6,q6,q3
vshl.i32 q7,q1,#4
vld1.8 {d5},[r4,: 64]
vshl.i32 q8,q4,#4
vtrn.32 d4,d5
vadd.i32 q7,q7,q1
vadd.i32 q8,q8,q4
vld1.8 {d18-d19},[r2,: 128]!
vshl.i32 q10,q2,#4
vld1.8 {d22-d23},[r2,: 128]!
vadd.i32 q10,q10,q2
vld1.8 {d24},[r2,: 64]
vadd.i32 q5,q5,q0
add r2,r3,#240
vld1.8 {d26-d27},[r2,: 128]!
vadd.i32 q6,q6,q3
vld1.8 {d28-d29},[r2,: 128]!
vadd.i32 q8,q8,q4
vld1.8 {d25},[r2,: 64]
vadd.i32 q10,q10,q2
vtrn.32 q9,q13
vadd.i32 q7,q7,q1
vadd.i32 q5,q5,q0
vtrn.32 q11,q14
vadd.i32 q6,q6,q3
add r2,sp,#560
vadd.i32 q10,q10,q2
vtrn.32 d24,d25
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q6,q13,#1
add r2,sp,#576
vst1.8 {d20-d21},[r2,: 128]
vshl.i32 q10,q14,#1
add r2,sp,#592
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q15,q12,#1
vadd.i32 q8,q8,q4
vext.32 d10,d31,d30,#0
vadd.i32 q7,q7,q1
add r2,sp,#608
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q8,d18,d5
vmlal.s32 q8,d26,d4
vmlal.s32 q8,d19,d9
vmlal.s32 q8,d27,d3
vmlal.s32 q8,d22,d8
vmlal.s32 q8,d28,d2
vmlal.s32 q8,d23,d7
vmlal.s32 q8,d29,d1
vmlal.s32 q8,d24,d6
vmlal.s32 q8,d25,d0
add r2,sp,#624
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q2,d18,d4
vmlal.s32 q2,d12,d9
vmlal.s32 q2,d13,d8
vmlal.s32 q2,d19,d3
vmlal.s32 q2,d22,d2
vmlal.s32 q2,d23,d1
vmlal.s32 q2,d24,d0
add r2,sp,#640
vst1.8 {d20-d21},[r2,: 128]
vmull.s32 q7,d18,d9
vmlal.s32 q7,d26,d3
vmlal.s32 q7,d19,d8
vmlal.s32 q7,d27,d2
vmlal.s32 q7,d22,d7
vmlal.s32 q7,d28,d1
vmlal.s32 q7,d23,d6
vmlal.s32 q7,d29,d0
add r2,sp,#656
vst1.8 {d10-d11},[r2,: 128]
vmull.s32 q5,d18,d3
vmlal.s32 q5,d19,d2
vmlal.s32 q5,d22,d1
vmlal.s32 q5,d23,d0
vmlal.s32 q5,d12,d8
add r2,sp,#672
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q4,d18,d8
vmlal.s32 q4,d26,d2
vmlal.s32 q4,d19,d7
vmlal.s32 q4,d27,d1
vmlal.s32 q4,d22,d6
vmlal.s32 q4,d28,d0
vmull.s32 q8,d18,d7
vmlal.s32 q8,d26,d1
vmlal.s32 q8,d19,d6
vmlal.s32 q8,d27,d0
add r2,sp,#576
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q7,d24,d21
vmlal.s32 q7,d25,d20
vmlal.s32 q4,d23,d21
vmlal.s32 q4,d29,d20
vmlal.s32 q8,d22,d21
vmlal.s32 q8,d28,d20
vmlal.s32 q5,d24,d20
add r2,sp,#576
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q7,d18,d6
vmlal.s32 q7,d26,d0
add r2,sp,#656
vld1.8 {d30-d31},[r2,: 128]
vmlal.s32 q2,d30,d21
vmlal.s32 q7,d19,d21
vmlal.s32 q7,d27,d20
add r2,sp,#624
vld1.8 {d26-d27},[r2,: 128]
vmlal.s32 q4,d25,d27
vmlal.s32 q8,d29,d27
vmlal.s32 q8,d25,d26
vmlal.s32 q7,d28,d27
vmlal.s32 q7,d29,d26
add r2,sp,#608
vld1.8 {d28-d29},[r2,: 128]
vmlal.s32 q4,d24,d29
vmlal.s32 q8,d23,d29
vmlal.s32 q8,d24,d28
vmlal.s32 q7,d22,d29
vmlal.s32 q7,d23,d28
add r2,sp,#608
vst1.8 {d8-d9},[r2,: 128]
add r2,sp,#560
vld1.8 {d8-d9},[r2,: 128]
vmlal.s32 q7,d24,d9
vmlal.s32 q7,d25,d31
vmull.s32 q1,d18,d2
vmlal.s32 q1,d19,d1
vmlal.s32 q1,d22,d0
vmlal.s32 q1,d24,d27
vmlal.s32 q1,d23,d20
vmlal.s32 q1,d12,d7
vmlal.s32 q1,d13,d6
vmull.s32 q6,d18,d1
vmlal.s32 q6,d19,d0
vmlal.s32 q6,d23,d27
vmlal.s32 q6,d22,d20
vmlal.s32 q6,d24,d26
vmull.s32 q0,d18,d0
vmlal.s32 q0,d22,d27
vmlal.s32 q0,d23,d26
vmlal.s32 q0,d24,d31
vmlal.s32 q0,d19,d20
add r2,sp,#640
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q2,d18,d7
vmlal.s32 q2,d19,d6
vmlal.s32 q5,d18,d6
vmlal.s32 q5,d19,d21
vmlal.s32 q1,d18,d21
vmlal.s32 q1,d19,d29
vmlal.s32 q0,d18,d28
vmlal.s32 q0,d19,d9
vmlal.s32 q6,d18,d29
vmlal.s32 q6,d19,d28
add r2,sp,#592
vld1.8 {d18-d19},[r2,: 128]
add r2,sp,#512
vld1.8 {d22-d23},[r2,: 128]
vmlal.s32 q5,d19,d7
vmlal.s32 q0,d18,d21
vmlal.s32 q0,d19,d29
vmlal.s32 q6,d18,d6
add r2,sp,#528
vld1.8 {d6-d7},[r2,: 128]
vmlal.s32 q6,d19,d21
add r2,sp,#576
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q0,d30,d8
add r2,sp,#672
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q5,d30,d29
add r2,sp,#608
vld1.8 {d24-d25},[r2,: 128]
vmlal.s32 q1,d30,d28
vadd.i64 q13,q0,q11
vadd.i64 q14,q5,q11
vmlal.s32 q6,d30,d9
vshr.s64 q4,q13,#26
vshr.s64 q13,q14,#26
vadd.i64 q7,q7,q4
vshl.i64 q4,q4,#26
vadd.i64 q14,q7,q3
vadd.i64 q9,q9,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q9,q3
vsub.i64 q0,q0,q4
vshr.s64 q4,q14,#25
vsub.i64 q5,q5,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q4
vshl.i64 q4,q4,#25
vadd.i64 q14,q6,q11
vadd.i64 q2,q2,q13
vsub.i64 q4,q7,q4
vshr.s64 q7,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q11
vadd.i64 q8,q8,q7
vshl.i64 q7,q7,#26
vadd.i64 q15,q8,q3
vsub.i64 q9,q9,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q7
vshr.s64 q7,q15,#25
vadd.i64 q10,q10,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q10,q3
vadd.i64 q1,q1,q7
add r2,r3,#240
vshl.i64 q7,q7,#25
add r4,r3,#144
vadd.i64 q15,q1,q11
add r2,r2,#8
vsub.i64 q2,q2,q13
add r4,r4,#8
vshr.s64 q13,q14,#25
vsub.i64 q7,q8,q7
vshr.s64 q8,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q12,q12,q8
vtrn.32 d12,d14
vshl.i64 q8,q8,#26
vtrn.32 d13,d15
vadd.i64 q3,q12,q3
vadd.i64 q0,q0,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q7,q13,#4
vst1.8 d13,[r4,: 64]!
vsub.i64 q1,q1,q8
vshr.s64 q3,q3,#25
vadd.i64 q0,q0,q7
vadd.i64 q5,q5,q3
vshl.i64 q3,q3,#25
vadd.i64 q6,q5,q11
vadd.i64 q0,q0,q13
vshl.i64 q7,q13,#25
vadd.i64 q8,q0,q11
vsub.i64 q3,q12,q3
vshr.s64 q6,q6,#26
vsub.i64 q7,q10,q7
vtrn.32 d2,d6
vshr.s64 q8,q8,#26
vtrn.32 d3,d7
vadd.i64 q3,q9,q6
vst1.8 d2,[r2,: 64]
vshl.i64 q6,q6,#26
vst1.8 d3,[r4,: 64]
vadd.i64 q1,q4,q8
vtrn.32 d4,d14
vshl.i64 q4,q8,#26
vtrn.32 d5,d15
vsub.i64 q5,q5,q6
add r2,r2,#16
vsub.i64 q0,q0,q4
vst1.8 d4,[r2,: 64]
add r4,r4,#16
vst1.8 d5,[r4,: 64]
vtrn.32 d10,d6
vtrn.32 d11,d7
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d0,d2
vtrn.32 d1,d3
vst1.8 d10,[r2,: 64]
vst1.8 d11,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d0,[r2,: 64]
vst1.8 d1,[r4,: 64]
ldr r2,[sp,#488]
ldr r4,[sp,#492]
subs r5,r2,#1
bge ._mainloop
add r1,r3,#144
add r2,r3,#336
vld1.8 {d0-d1},[r1,: 128]!
vld1.8 {d2-d3},[r1,: 128]!
vld1.8 {d4},[r1,: 64]
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 {d2-d3},[r2,: 128]!
vst1.8 d4,[r2,: 64]
ldr r1,=0
._invertloop:
add r2,r3,#144
ldr r4,=0
ldr r5,=2
cmp r1,#1
ldreq r5,=1
addeq r2,r3,#336
addeq r4,r3,#48
cmp r1,#2
ldreq r5,=1
addeq r2,r3,#48
cmp r1,#3
ldreq r5,=5
addeq r4,r3,#336
cmp r1,#4
ldreq r5,=10
cmp r1,#5
ldreq r5,=20
cmp r1,#6
ldreq r5,=10
addeq r2,r3,#336
addeq r4,r3,#336
cmp r1,#7
ldreq r5,=50
cmp r1,#8
ldreq r5,=100
cmp r1,#9
ldreq r5,=50
addeq r2,r3,#336
cmp r1,#10
ldreq r5,=5
addeq r2,r3,#48
cmp r1,#11
ldreq r5,=0
addeq r2,r3,#96
add r6,r3,#144
add r7,r3,#288
vld1.8 {d0-d1},[r6,: 128]!
vld1.8 {d2-d3},[r6,: 128]!
vld1.8 {d4},[r6,: 64]
vst1.8 {d0-d1},[r7,: 128]!
vst1.8 {d2-d3},[r7,: 128]!
vst1.8 d4,[r7,: 64]
cmp r5,#0
beq ._skipsquaringloop
._squaringloop:
add r6,r3,#288
add r7,r3,#288
add r8,r3,#288
vmov.i32 q0,#19
vmov.i32 q1,#0
vmov.i32 q2,#1
vzip.i32 q1,q2
vld1.8 {d4-d5},[r7,: 128]!
vld1.8 {d6-d7},[r7,: 128]!
vld1.8 {d9},[r7,: 64]
vld1.8 {d10-d11},[r6,: 128]!
add r7,sp,#416
vld1.8 {d12-d13},[r6,: 128]!
vmul.i32 q7,q2,q0
vld1.8 {d8},[r6,: 64]
vext.32 d17,d11,d10,#1
vmul.i32 q9,q3,q0
vext.32 d16,d10,d8,#1
vshl.u32 q10,q5,q1
vext.32 d22,d14,d4,#1
vext.32 d24,d18,d6,#1
vshl.u32 q13,q6,q1
vshl.u32 d28,d8,d2
vrev64.i32 d22,d22
vmul.i32 d1,d9,d1
vrev64.i32 d24,d24
vext.32 d29,d8,d13,#1
vext.32 d0,d1,d9,#1
vrev64.i32 d0,d0
vext.32 d2,d9,d1,#1
vext.32 d23,d15,d5,#1
vmull.s32 q4,d20,d4
vrev64.i32 d23,d23
vmlal.s32 q4,d21,d1
vrev64.i32 d2,d2
vmlal.s32 q4,d26,d19
vext.32 d3,d5,d15,#1
vmlal.s32 q4,d27,d18
vrev64.i32 d3,d3
vmlal.s32 q4,d28,d15
vext.32 d14,d12,d11,#1
vmull.s32 q5,d16,d23
vext.32 d15,d13,d12,#1
vmlal.s32 q5,d17,d4
vst1.8 d8,[r7,: 64]!
vmlal.s32 q5,d14,d1
vext.32 d12,d9,d8,#0
vmlal.s32 q5,d15,d19
vmov.i64 d13,#0
vmlal.s32 q5,d29,d18
vext.32 d25,d19,d7,#1
vmlal.s32 q6,d20,d5
vrev64.i32 d25,d25
vmlal.s32 q6,d21,d4
vst1.8 d11,[r7,: 64]!
vmlal.s32 q6,d26,d1
vext.32 d9,d10,d10,#0
vmlal.s32 q6,d27,d19
vmov.i64 d8,#0
vmlal.s32 q6,d28,d18
vmlal.s32 q4,d16,d24
vmlal.s32 q4,d17,d5
vmlal.s32 q4,d14,d4
vst1.8 d12,[r7,: 64]!
vmlal.s32 q4,d15,d1
vext.32 d10,d13,d12,#0
vmlal.s32 q4,d29,d19
vmov.i64 d11,#0
vmlal.s32 q5,d20,d6
vmlal.s32 q5,d21,d5
vmlal.s32 q5,d26,d4
vext.32 d13,d8,d8,#0
vmlal.s32 q5,d27,d1
vmov.i64 d12,#0
vmlal.s32 q5,d28,d19
vst1.8 d9,[r7,: 64]!
vmlal.s32 q6,d16,d25
vmlal.s32 q6,d17,d6
vst1.8 d10,[r7,: 64]
vmlal.s32 q6,d14,d5
vext.32 d8,d11,d10,#0
vmlal.s32 q6,d15,d4
vmov.i64 d9,#0
vmlal.s32 q6,d29,d1
vmlal.s32 q4,d20,d7
vmlal.s32 q4,d21,d6
vmlal.s32 q4,d26,d5
vext.32 d11,d12,d12,#0
vmlal.s32 q4,d27,d4
vmov.i64 d10,#0
vmlal.s32 q4,d28,d1
vmlal.s32 q5,d16,d0
sub r6,r7,#32
vmlal.s32 q5,d17,d7
vmlal.s32 q5,d14,d6
vext.32 d30,d9,d8,#0
vmlal.s32 q5,d15,d5
vld1.8 {d31},[r6,: 64]!
vmlal.s32 q5,d29,d4
vmlal.s32 q15,d20,d0
vext.32 d0,d6,d18,#1
vmlal.s32 q15,d21,d25
vrev64.i32 d0,d0
vmlal.s32 q15,d26,d24
vext.32 d1,d7,d19,#1
vext.32 d7,d10,d10,#0
vmlal.s32 q15,d27,d23
vrev64.i32 d1,d1
vld1.8 {d6},[r6,: 64]
vmlal.s32 q15,d28,d22
vmlal.s32 q3,d16,d4
add r6,r6,#24
vmlal.s32 q3,d17,d2
vext.32 d4,d31,d30,#0
vmov d17,d11
vmlal.s32 q3,d14,d1
vext.32 d11,d13,d13,#0
vext.32 d13,d30,d30,#0
vmlal.s32 q3,d15,d0
vext.32 d1,d8,d8,#0
vmlal.s32 q3,d29,d3
vld1.8 {d5},[r6,: 64]
sub r6,r6,#16
vext.32 d10,d6,d6,#0
vmov.i32 q1,#0xffffffff
vshl.i64 q4,q1,#25
add r7,sp,#512
vld1.8 {d14-d15},[r7,: 128]
vadd.i64 q9,q2,q7
vshl.i64 q1,q1,#26
vshr.s64 q10,q9,#26
vld1.8 {d0},[r6,: 64]!
vadd.i64 q5,q5,q10
vand q9,q9,q1
vld1.8 {d16},[r6,: 64]!
add r6,sp,#528
vld1.8 {d20-d21},[r6,: 128]
vadd.i64 q11,q5,q10
vsub.i64 q2,q2,q9
vshr.s64 q9,q11,#25
vext.32 d12,d5,d4,#0
vand q11,q11,q4
vadd.i64 q0,q0,q9
vmov d19,d7
vadd.i64 q3,q0,q7
vsub.i64 q5,q5,q11
vshr.s64 q11,q3,#26
vext.32 d18,d11,d10,#0
vand q3,q3,q1
vadd.i64 q8,q8,q11
vadd.i64 q11,q8,q10
vsub.i64 q0,q0,q3
vshr.s64 q3,q11,#25
vand q11,q11,q4
vadd.i64 q3,q6,q3
vadd.i64 q6,q3,q7
vsub.i64 q8,q8,q11
vshr.s64 q11,q6,#26
vand q6,q6,q1
vadd.i64 q9,q9,q11
vadd.i64 d25,d19,d21
vsub.i64 q3,q3,q6
vshr.s64 d23,d25,#25
vand q4,q12,q4
vadd.i64 d21,d23,d23
vshl.i64 d25,d23,#4
vadd.i64 d21,d21,d23
vadd.i64 d25,d25,d21
vadd.i64 d4,d4,d25
vzip.i32 q0,q8
vadd.i64 d12,d4,d14
add r6,r8,#8
vst1.8 d0,[r6,: 64]
vsub.i64 d19,d19,d9
add r6,r6,#16
vst1.8 d16,[r6,: 64]
vshr.s64 d22,d12,#26
vand q0,q6,q1
vadd.i64 d10,d10,d22
vzip.i32 q3,q9
vsub.i64 d4,d4,d0
sub r6,r6,#8
vst1.8 d6,[r6,: 64]
add r6,r6,#16
vst1.8 d18,[r6,: 64]
vzip.i32 q2,q5
sub r6,r6,#32
vst1.8 d4,[r6,: 64]
subs r5,r5,#1
bhi ._squaringloop
._skipsquaringloop:
mov r2,r2
add r5,r3,#288
add r6,r3,#144
vmov.i32 q0,#19
vmov.i32 q1,#0
vmov.i32 q2,#1
vzip.i32 q1,q2
vld1.8 {d4-d5},[r5,: 128]!
vld1.8 {d6-d7},[r5,: 128]!
vld1.8 {d9},[r5,: 64]
vld1.8 {d10-d11},[r2,: 128]!
add r5,sp,#416
vld1.8 {d12-d13},[r2,: 128]!
vmul.i32 q7,q2,q0
vld1.8 {d8},[r2,: 64]
vext.32 d17,d11,d10,#1
vmul.i32 q9,q3,q0
vext.32 d16,d10,d8,#1
vshl.u32 q10,q5,q1
vext.32 d22,d14,d4,#1
vext.32 d24,d18,d6,#1
vshl.u32 q13,q6,q1
vshl.u32 d28,d8,d2
vrev64.i32 d22,d22
vmul.i32 d1,d9,d1
vrev64.i32 d24,d24
vext.32 d29,d8,d13,#1
vext.32 d0,d1,d9,#1
vrev64.i32 d0,d0
vext.32 d2,d9,d1,#1
vext.32 d23,d15,d5,#1
vmull.s32 q4,d20,d4
vrev64.i32 d23,d23
vmlal.s32 q4,d21,d1
vrev64.i32 d2,d2
vmlal.s32 q4,d26,d19
vext.32 d3,d5,d15,#1
vmlal.s32 q4,d27,d18
vrev64.i32 d3,d3
vmlal.s32 q4,d28,d15
vext.32 d14,d12,d11,#1
vmull.s32 q5,d16,d23
vext.32 d15,d13,d12,#1
vmlal.s32 q5,d17,d4
vst1.8 d8,[r5,: 64]!
vmlal.s32 q5,d14,d1
vext.32 d12,d9,d8,#0
vmlal.s32 q5,d15,d19
vmov.i64 d13,#0
vmlal.s32 q5,d29,d18
vext.32 d25,d19,d7,#1
vmlal.s32 q6,d20,d5
vrev64.i32 d25,d25
vmlal.s32 q6,d21,d4
vst1.8 d11,[r5,: 64]!
vmlal.s32 q6,d26,d1
vext.32 d9,d10,d10,#0
vmlal.s32 q6,d27,d19
vmov.i64 d8,#0
vmlal.s32 q6,d28,d18
vmlal.s32 q4,d16,d24
vmlal.s32 q4,d17,d5
vmlal.s32 q4,d14,d4
vst1.8 d12,[r5,: 64]!
vmlal.s32 q4,d15,d1
vext.32 d10,d13,d12,#0
vmlal.s32 q4,d29,d19
vmov.i64 d11,#0
vmlal.s32 q5,d20,d6
vmlal.s32 q5,d21,d5
vmlal.s32 q5,d26,d4
vext.32 d13,d8,d8,#0
vmlal.s32 q5,d27,d1
vmov.i64 d12,#0
vmlal.s32 q5,d28,d19
vst1.8 d9,[r5,: 64]!
vmlal.s32 q6,d16,d25
vmlal.s32 q6,d17,d6
vst1.8 d10,[r5,: 64]
vmlal.s32 q6,d14,d5
vext.32 d8,d11,d10,#0
vmlal.s32 q6,d15,d4
vmov.i64 d9,#0
vmlal.s32 q6,d29,d1
vmlal.s32 q4,d20,d7
vmlal.s32 q4,d21,d6
vmlal.s32 q4,d26,d5
vext.32 d11,d12,d12,#0
vmlal.s32 q4,d27,d4
vmov.i64 d10,#0
vmlal.s32 q4,d28,d1
vmlal.s32 q5,d16,d0
sub r2,r5,#32
vmlal.s32 q5,d17,d7
vmlal.s32 q5,d14,d6
vext.32 d30,d9,d8,#0
vmlal.s32 q5,d15,d5
vld1.8 {d31},[r2,: 64]!
vmlal.s32 q5,d29,d4
vmlal.s32 q15,d20,d0
vext.32 d0,d6,d18,#1
vmlal.s32 q15,d21,d25
vrev64.i32 d0,d0
vmlal.s32 q15,d26,d24
vext.32 d1,d7,d19,#1
vext.32 d7,d10,d10,#0
vmlal.s32 q15,d27,d23
vrev64.i32 d1,d1
vld1.8 {d6},[r2,: 64]
vmlal.s32 q15,d28,d22
vmlal.s32 q3,d16,d4
add r2,r2,#24
vmlal.s32 q3,d17,d2
vext.32 d4,d31,d30,#0
vmov d17,d11
vmlal.s32 q3,d14,d1
vext.32 d11,d13,d13,#0
vext.32 d13,d30,d30,#0
vmlal.s32 q3,d15,d0
vext.32 d1,d8,d8,#0
vmlal.s32 q3,d29,d3
vld1.8 {d5},[r2,: 64]
sub r2,r2,#16
vext.32 d10,d6,d6,#0
vmov.i32 q1,#0xffffffff
vshl.i64 q4,q1,#25
add r5,sp,#512
vld1.8 {d14-d15},[r5,: 128]
vadd.i64 q9,q2,q7
vshl.i64 q1,q1,#26
vshr.s64 q10,q9,#26
vld1.8 {d0},[r2,: 64]!
vadd.i64 q5,q5,q10
vand q9,q9,q1
vld1.8 {d16},[r2,: 64]!
add r2,sp,#528
vld1.8 {d20-d21},[r2,: 128]
vadd.i64 q11,q5,q10
vsub.i64 q2,q2,q9
vshr.s64 q9,q11,#25
vext.32 d12,d5,d4,#0
vand q11,q11,q4
vadd.i64 q0,q0,q9
vmov d19,d7
vadd.i64 q3,q0,q7
vsub.i64 q5,q5,q11
vshr.s64 q11,q3,#26
vext.32 d18,d11,d10,#0
vand q3,q3,q1
vadd.i64 q8,q8,q11
vadd.i64 q11,q8,q10
vsub.i64 q0,q0,q3
vshr.s64 q3,q11,#25
vand q11,q11,q4
vadd.i64 q3,q6,q3
vadd.i64 q6,q3,q7
vsub.i64 q8,q8,q11
vshr.s64 q11,q6,#26
vand q6,q6,q1
vadd.i64 q9,q9,q11
vadd.i64 d25,d19,d21
vsub.i64 q3,q3,q6
vshr.s64 d23,d25,#25
vand q4,q12,q4
vadd.i64 d21,d23,d23
vshl.i64 d25,d23,#4
vadd.i64 d21,d21,d23
vadd.i64 d25,d25,d21
vadd.i64 d4,d4,d25
vzip.i32 q0,q8
vadd.i64 d12,d4,d14
add r2,r6,#8
vst1.8 d0,[r2,: 64]
vsub.i64 d19,d19,d9
add r2,r2,#16
vst1.8 d16,[r2,: 64]
vshr.s64 d22,d12,#26
vand q0,q6,q1
vadd.i64 d10,d10,d22
vzip.i32 q3,q9
vsub.i64 d4,d4,d0
sub r2,r2,#8
vst1.8 d6,[r2,: 64]
add r2,r2,#16
vst1.8 d18,[r2,: 64]
vzip.i32 q2,q5
sub r2,r2,#32
vst1.8 d4,[r2,: 64]
cmp r4,#0
beq ._skippostcopy
add r2,r3,#144
mov r4,r4
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4},[r2,: 64]
vst1.8 {d0-d1},[r4,: 128]!
vst1.8 {d2-d3},[r4,: 128]!
vst1.8 d4,[r4,: 64]
._skippostcopy:
cmp r1,#1
bne ._skipfinalcopy
add r2,r3,#288
add r4,r3,#144
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4},[r2,: 64]
vst1.8 {d0-d1},[r4,: 128]!
vst1.8 {d2-d3},[r4,: 128]!
vst1.8 d4,[r4,: 64]
._skipfinalcopy:
add r1,r1,#1
cmp r1,#12
blo ._invertloop
add r1,r3,#144
ldr r2,[r1],#4
ldr r3,[r1],#4
ldr r4,[r1],#4
ldr r5,[r1],#4
ldr r6,[r1],#4
ldr r7,[r1],#4
ldr r8,[r1],#4
ldr r9,[r1],#4
ldr r10,[r1],#4
ldr r1,[r1]
add r11,r1,r1,LSL #4
add r11,r11,r1,LSL #1
add r11,r11,#16777216
mov r11,r11,ASR #25
add r11,r11,r2
mov r11,r11,ASR #26
add r11,r11,r3
mov r11,r11,ASR #25
add r11,r11,r4
mov r11,r11,ASR #26
add r11,r11,r5
mov r11,r11,ASR #25
add r11,r11,r6
mov r11,r11,ASR #26
add r11,r11,r7
mov r11,r11,ASR #25
add r11,r11,r8
mov r11,r11,ASR #26
add r11,r11,r9
mov r11,r11,ASR #25
add r11,r11,r10
mov r11,r11,ASR #26
add r11,r11,r1
mov r11,r11,ASR #25
add r2,r2,r11
add r2,r2,r11,LSL #1
add r2,r2,r11,LSL #4
mov r11,r2,ASR #26
add r3,r3,r11
sub r2,r2,r11,LSL #26
mov r11,r3,ASR #25
add r4,r4,r11
sub r3,r3,r11,LSL #25
mov r11,r4,ASR #26
add r5,r5,r11
sub r4,r4,r11,LSL #26
mov r11,r5,ASR #25
add r6,r6,r11
sub r5,r5,r11,LSL #25
mov r11,r6,ASR #26
add r7,r7,r11
sub r6,r6,r11,LSL #26
mov r11,r7,ASR #25
add r8,r8,r11
sub r7,r7,r11,LSL #25
mov r11,r8,ASR #26
add r9,r9,r11
sub r8,r8,r11,LSL #26
mov r11,r9,ASR #25
add r10,r10,r11
sub r9,r9,r11,LSL #25
mov r11,r10,ASR #26
add r1,r1,r11
sub r10,r10,r11,LSL #26
mov r11,r1,ASR #25
sub r1,r1,r11,LSL #25
add r2,r2,r3,LSL #26
mov r3,r3,LSR #6
add r3,r3,r4,LSL #19
mov r4,r4,LSR #13
add r4,r4,r5,LSL #13
mov r5,r5,LSR #19
add r5,r5,r6,LSL #6
add r6,r7,r8,LSL #25
mov r7,r8,LSR #7
add r7,r7,r9,LSL #19
mov r8,r9,LSR #13
add r8,r8,r10,LSL #12
mov r9,r10,LSR #20
add r1,r9,r1,LSL #6
str r2,[r0],#4
str r3,[r0],#4
str r4,[r0],#4
str r5,[r0],#4
str r6,[r0],#4
str r7,[r0],#4
str r8,[r0],#4
str r1,[r0]
ldrd r4,[sp,#0]
ldrd r6,[sp,#8]
ldrd r8,[sp,#16]
ldrd r10,[sp,#24]
ldr r12,[sp,#480]
ldr r14,[sp,#484]
ldr r0,=0
mov sp,r12
vpop {q4,q5,q6,q7}
bx lr
#endif /* !OPENSSL_NO_ASM && OPENSSL_ARM && __ELF__ */
|
Chaos2025/openChaos
| 1,598
|
kernel/modules/axhal/linker.lds.S
|
OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
.rodata : ALIGN(4K) {
_srodata = .;
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
. = ALIGN(4K);
_erodata = .;
}
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = ALIGN(64);
_percpu_size_aligned = .;
. = _percpu_load_start + _percpu_size_aligned * %SMP%;
}
. = _percpu_start + SIZEOF(.percpu);
_percpu_end = .;
. = ALIGN(4K);
_edata = .;
.bss : ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
|
Chaos2025/openChaos
| 4,307
|
kernel/modules/axhal/src/platform/x86_pc/multiboot.S
|
# Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 510
# 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
Chaos2025/openChaos
| 1,965
|
kernel/modules/axhal/src/platform/x86_pc/ap_start.S
|
# Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
Chaos2025/openChaos
| 1,672
|
kernel/modules/axhal/src/arch/riscv/trap.S
|
.macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.if \from_user == 1
LDR t0, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv tp, t0
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR gp, sp, 2 // load user gp and tp
LDR t0, sp, 3
STR tp, sp, 3 // save supervisor tp
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
LDR t0, sp, 31
LDR t1, sp, 32
csrw sepc, t0
csrw sstatus, t1
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // switch sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
|
Chaos2025/openChaos
| 1,505
|
kernel/modules/axhal/src/arch/x86_64/trap.S
|
.equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp .Ltrap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp .Ltrap_common
.endif
.endm
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .text
.code64
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.Ltrap_common:
test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space
jz 1f
swapgs
1:
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_trap_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space
jz 2f
swapgs
2:
add rsp, 16 # pop vector, error_code
iretq
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
|
Chaos2025/openChaos
| 2,415
|
kernel/modules/axhal/src/arch/aarch64/trap.S
|
.macro SAVE_REGS
sub sp, sp, 34 * 8
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
mrs x9, sp_el0
mrs x10, elr_el1
mrs x11, spsr_el1
stp x30, x9, [sp, 30 * 8]
stp x10, x11, [sp, 32 * 8]
.endm
.macro RESTORE_REGS
ldp x10, x11, [sp, 32 * 8]
ldp x30, x9, [sp, 30 * 8]
msr sp_el0, x9
msr elr_el1, x10
msr spsr_el1, x11
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, 34 * 8
.endm
.macro INVALID_EXCP, kind, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \kind
mov x2, \source
bl invalid_exception
b .Lexception_return
.endm
.macro HANDLE_SYNC
.p2align 7
SAVE_REGS
mov x0, sp
bl handle_sync_exception
b .Lexception_return
.endm
.macro HANDLE_IRQ
.p2align 7
SAVE_REGS
mov x0, sp
bl handle_irq_exception
b .Lexception_return
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
INVALID_EXCP 0 0
INVALID_EXCP 1 0
INVALID_EXCP 2 0
INVALID_EXCP 3 0
// current EL, with SP_ELx
HANDLE_SYNC
HANDLE_IRQ
INVALID_EXCP 2 1
INVALID_EXCP 3 1
// lower EL, aarch64
HANDLE_SYNC
HANDLE_IRQ
INVALID_EXCP 2 2
INVALID_EXCP 3 2
// lower EL, aarch32
INVALID_EXCP 0 3
INVALID_EXCP 1 3
INVALID_EXCP 2 3
INVALID_EXCP 3 3
.Lexception_return:
RESTORE_REGS
eret
|
Chaos2025/openChaos
| 2,544
|
kernel/tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s
|
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g2:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_ABS x0, __bss_start
ADR_ABS x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_relocate_binary
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Next, relocate the binary.
.L_relocate_binary:
ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to.
ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to.
ADR_ABS x2, __binary_nonzero_end_exclusive
.L_copy_loop:
ldr x3, [x0], #8
str x3, [x1], #8
cmp x1, x2
b.lo .L_copy_loop
// Prepare the jump to Rust code.
// Set the stack pointer.
ADR_ABS x0, __boot_core_stack_end_exclusive
mov sp, x0
// Jump to the relocated Rust code.
ADR_ABS x1, _start_rust
br x1
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
cheng-zhangpei/orangeOS
| 2,014
|
os/src/trap/trap.S
|
# os/src/trap/trap.S
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.align 2
# 总结一下,这段内容是用户主动trap的前置准备,也就是保存用户的上下文。一共保存下面三个内容
# 1. x1~x31 的通用寄存器
# 2. t1、t2寄存器用于保存CSR中的sstatus、sepc
# 3. 用户栈的指针
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack,在这一行之前 sp 指向用户栈, sscratch 指向内核栈(原因稍后说明)
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
#=============================保存通用寄存器================================
sd x1, 1*8(sp) # 这个意思是偏移8个字节的意思,因为riscv是64bit架构,一个寄存器是64位的
# skip sp(x2), we will save it later,所以中间空出来了一个寄存器
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
#====================保存状态寄存器中的两个寄存器用于trap的恢复===============
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# =======================读取用户栈指针并将其保存在内核栈====================
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
# =======================调用对应的trap_handler处理用户触发内陷====================
call trap_handler
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
# 很显然这个函数就是用于恢复用户程序的上下文的
__restore:
# case1: start running app by __restore
# case2: back to U after handling trap
mv sp, a0
# =================================把trap的三个状态寄存器从栈里面拿出来=====================
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# ==================================恢复通用===================================
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# ================================让sp指向用户栈,sscratch指向内核栈=====================
csrrw sp, sscratch, sp
# =====================================切换特权级================================
sret
|
CHERIoT-Platform/cheri-rust
| 4,337
|
library/compiler-builtins/compiler-builtins/src/hexagon/dfsqrt.s
|
.text
.global __hexagon_sqrtdf2
.type __hexagon_sqrtdf2,@function
.global __hexagon_sqrt
.type __hexagon_sqrt,@function
.global __qdsp_sqrtdf2 ; .set __qdsp_sqrtdf2, __hexagon_sqrtdf2; .type __qdsp_sqrtdf2,@function
.global __qdsp_sqrt ; .set __qdsp_sqrt, __hexagon_sqrt; .type __qdsp_sqrt,@function
.global __hexagon_fast_sqrtdf2 ; .set __hexagon_fast_sqrtdf2, __hexagon_sqrtdf2; .type __hexagon_fast_sqrtdf2,@function
.global __hexagon_fast_sqrt ; .set __hexagon_fast_sqrt, __hexagon_sqrt; .type __hexagon_fast_sqrt,@function
.global __hexagon_fast2_sqrtdf2 ; .set __hexagon_fast2_sqrtdf2, __hexagon_sqrtdf2; .type __hexagon_fast2_sqrtdf2,@function
.global __hexagon_fast2_sqrt ; .set __hexagon_fast2_sqrt, __hexagon_sqrt; .type __hexagon_fast2_sqrt,@function
.type sqrt,@function
.p2align 5
__hexagon_sqrtdf2:
__hexagon_sqrt:
{
r15:14 = extractu(r1:0,#23 +1,#52 -23)
r28 = extractu(r1,#11,#52 -32)
r5:4 = combine(##0x3f000004,#1)
}
{
p2 = dfclass(r1:0,#0x02)
p2 = cmp.gt(r1,#-1)
if (!p2.new) jump:nt .Lsqrt_abnormal
r9 = or(r5,r14)
}
.Ldenormal_restart:
{
r11:10 = r1:0
r7,p0 = sfinvsqrta(r9)
r5 = and(r5,#-16)
r3:2 = #0
}
{
r3 += sfmpy(r7,r9):lib
r2 += sfmpy(r7,r5):lib
r6 = r5
r9 = and(r28,#1)
}
{
r6 -= sfmpy(r3,r2):lib
r11 = insert(r4,#11 +1,#52 -32)
p1 = cmp.gtu(r9,#0)
}
{
r3 += sfmpy(r3,r6):lib
r2 += sfmpy(r2,r6):lib
r6 = r5
r9 = mux(p1,#8,#9)
}
{
r6 -= sfmpy(r3,r2):lib
r11:10 = asl(r11:10,r9)
r9 = mux(p1,#3,#2)
}
{
r2 += sfmpy(r2,r6):lib
r15:14 = asl(r11:10,r9)
}
{
r2 = and(r2,##0x007fffff)
}
{
r2 = add(r2,##0x00800000 - 3)
r9 = mux(p1,#7,#8)
}
{
r8 = asl(r2,r9)
r9 = mux(p1,#15-(1+1),#15-(1+0))
}
{
r13:12 = mpyu(r8,r15)
}
{
r1:0 = asl(r11:10,#15)
r15:14 = mpyu(r13,r13)
p1 = cmp.eq(r0,r0)
}
{
r1:0 -= asl(r15:14,#15)
r15:14 = mpyu(r13,r12)
p2 = cmp.eq(r0,r0)
}
{
r1:0 -= lsr(r15:14,#16)
p3 = cmp.eq(r0,r0)
}
{
r1:0 = mpyu(r1,r8)
}
{
r13:12 += lsr(r1:0,r9)
r9 = add(r9,#16)
r1:0 = asl(r11:10,#31)
}
{
r15:14 = mpyu(r13,r13)
r1:0 -= mpyu(r13,r12)
}
{
r1:0 -= asl(r15:14,#31)
r15:14 = mpyu(r12,r12)
}
{
r1:0 -= lsr(r15:14,#33)
}
{
r1:0 = mpyu(r1,r8)
}
{
r13:12 += lsr(r1:0,r9)
r9 = add(r9,#16)
r1:0 = asl(r11:10,#47)
}
{
r15:14 = mpyu(r13,r13)
}
{
r1:0 -= asl(r15:14,#47)
r15:14 = mpyu(r13,r12)
}
{
r1:0 -= asl(r15:14,#16)
r15:14 = mpyu(r12,r12)
}
{
r1:0 -= lsr(r15:14,#17)
}
{
r1:0 = mpyu(r1,r8)
}
{
r13:12 += lsr(r1:0,r9)
}
{
r3:2 = mpyu(r13,r12)
r5:4 = mpyu(r12,r12)
r15:14 = #0
r1:0 = #0
}
{
r3:2 += lsr(r5:4,#33)
r5:4 += asl(r3:2,#33)
p1 = cmp.eq(r0,r0)
}
{
r7:6 = mpyu(r13,r13)
r1:0 = sub(r1:0,r5:4,p1):carry
r9:8 = #1
}
{
r7:6 += lsr(r3:2,#31)
r9:8 += asl(r13:12,#1)
}
{
r15:14 = sub(r11:10,r7:6,p1):carry
r5:4 = sub(r1:0,r9:8,p2):carry
r7:6 = #1
r11:10 = #0
}
{
r3:2 = sub(r15:14,r11:10,p2):carry
r7:6 = add(r13:12,r7:6)
r28 = add(r28,#-0x3ff)
}
{
if (p2) r13:12 = r7:6
if (p2) r1:0 = r5:4
if (p2) r15:14 = r3:2
}
{
r5:4 = sub(r1:0,r9:8,p3):carry
r7:6 = #1
r28 = asr(r28,#1)
}
{
r3:2 = sub(r15:14,r11:10,p3):carry
r7:6 = add(r13:12,r7:6)
}
{
if (p3) r13:12 = r7:6
if (p3) r1:0 = r5:4
r2 = #1
}
{
p0 = cmp.eq(r1:0,r11:10)
if (!p0.new) r12 = or(r12,r2)
r3 = cl0(r13:12)
r28 = add(r28,#-63)
}
{
r1:0 = convert_ud2df(r13:12)
r28 = add(r28,r3)
}
{
r1 += asl(r28,#52 -32)
jumpr r31
}
.Lsqrt_abnormal:
{
p0 = dfclass(r1:0,#0x01)
if (p0.new) jumpr:t r31
}
{
p0 = dfclass(r1:0,#0x10)
if (p0.new) jump:nt .Lsqrt_nan
}
{
p0 = cmp.gt(r1,#-1)
if (!p0.new) jump:nt .Lsqrt_invalid_neg
if (!p0.new) r28 = ##0x7F800001
}
{
p0 = dfclass(r1:0,#0x08)
if (p0.new) jumpr:nt r31
}
{
r1:0 = extractu(r1:0,#52,#0)
}
{
r28 = add(clb(r1:0),#-11)
}
{
r1:0 = asl(r1:0,r28)
r28 = sub(#1,r28)
}
{
r1 = insert(r28,#1,#52 -32)
}
{
r3:2 = extractu(r1:0,#23 +1,#52 -23)
r5 = ##0x3f000004
}
{
r9 = or(r5,r2)
r5 = and(r5,#-16)
jump .Ldenormal_restart
}
.Lsqrt_nan:
{
r28 = convert_df2sf(r1:0)
r1:0 = #-1
jumpr r31
}
.Lsqrt_invalid_neg:
{
r1:0 = convert_sf2df(r28)
jumpr r31
}
.size __hexagon_sqrt,.-__hexagon_sqrt
.size __hexagon_sqrtdf2,.-__hexagon_sqrtdf2
|
CHERIoT-Platform/cheri-rust
| 3,885
|
library/compiler-builtins/compiler-builtins/src/hexagon/fastmath2_ldlib_asm.s
|
.text
.global __hexagon_fast2ldadd_asm
.type __hexagon_fast2ldadd_asm, @function
__hexagon_fast2ldadd_asm:
.falign
{
R4 = memw(r29+#8)
R5 = memw(r29+#24)
r7 = r0
}
{
R6 = sub(R4, R5):sat
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
R6 = abs(R6):sat
if ( P0) R4 = #1
if (!P0) R5 = #1
R9 = #62
} {
R6 = MIN(R6, R9)
R1:0 = memd(r29+#0)
R3:2 = memd(r29+#16)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = add(R1:0, R3:2)
R3:2 = #0
} {
R4 = clb(R1:0)
R9.L =#0x0001
} {
R8 -= add(R4, #-1)
R4 = add(R4, #-1)
p0 = cmp.gt(R4, #58)
R9.H =#0x8000
} {
if(!p0)memw(r7+#8) = R8
R1:0 = ASL(R1:0, R4)
if(p0) jump .Ldenorma1
} {
memd(r7+#0) = R1:0
jumpr r31
}
.Ldenorma1:
memd(r7+#0) = R3:2
{
memw(r7+#8) = R9
jumpr r31
}
.text
.global __hexagon_fast2ldsub_asm
.type __hexagon_fast2ldsub_asm, @function
__hexagon_fast2ldsub_asm:
.falign
{
R4 = memw(r29+#8)
R5 = memw(r29+#24)
r7 = r0
}
{
R6 = sub(R4, R5):sat
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
R6 = abs(R6):sat
if ( P0) R4 = #1
if (!P0) R5 = #1
R9 = #62
} {
R6 = min(R6, R9)
R1:0 = memd(r29+#0)
R3:2 = memd(r29+#16)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = sub(R1:0, R3:2)
R3:2 = #0
} {
R4 = clb(R1:0)
R9.L =#0x0001
} {
R8 -= add(R4, #-1)
R4 = add(R4, #-1)
p0 = cmp.gt(R4, #58)
R9.H =#0x8000
} {
if(!p0)memw(r7+#8) = R8
R1:0 = asl(R1:0, R4)
if(p0) jump .Ldenorma_s
} {
memd(r7+#0) = R1:0
jumpr r31
}
.Ldenorma_s:
memd(r7+#0) = R3:2
{
memw(r7+#8) = R9
jumpr r31
}
.text
.global __hexagon_fast2ldmpy_asm
.type __hexagon_fast2ldmpy_asm, @function
__hexagon_fast2ldmpy_asm:
.falign
{
R15:14 = memd(r29+#0)
R3:2 = memd(r29+#16)
R13:12 = #0
}
{
R8= extractu(R2, #31, #1)
R9= extractu(R14, #31, #1)
R13.H = #0x8000
}
{
R11:10 = mpy(R15, R3)
R7:6 = mpy(R15, R8)
R4 = memw(r29+#8)
R5 = memw(r29+#24)
}
{
R11:10 = add(R11:10, R11:10)
R7:6 += mpy(R3, R9)
}
{
R7:6 = asr(R7:6, #30)
R8.L = #0x0001
p1 = cmp.eq(R15:14, R3:2)
}
{
R7:6 = add(R7:6, R11:10)
R4= add(R4, R5)
p2 = cmp.eq(R3:2, R13:12)
}
{
R9 = clb(R7:6)
R8.H = #0x8000
p1 = and(p1, p2)
}
{
R4-= add(R9, #-1)
R9 = add(R9, #-1)
if(p1) jump .Lsat1
}
{
R7:6 = asl(R7:6, R9)
memw(R0+#8) = R4
p0 = cmp.gt(R9, #58)
if(p0.new) jump:NT .Ldenorm1
}
{
memd(R0+#0) = R7:6
jumpr r31
}
.Lsat1:
{
R13:12 = #0
R4+= add(R9, #1)
}
{
R13.H = #0x4000
memw(R0+#8) = R4
}
{
memd(R0+#0) = R13:12
jumpr r31
}
.Ldenorm1:
{
memw(R0+#8) = R8
R15:14 = #0
}
{
memd(R0+#0) = R15:14
jumpr r31
}
|
CHERIoT-Platform/cheri-rust
| 4,378
|
library/compiler-builtins/compiler-builtins/src/hexagon/dfmul.s
|
.text
.global __hexagon_muldf3
.type __hexagon_muldf3,@function
.global __qdsp_muldf3 ; .set __qdsp_muldf3, __hexagon_muldf3
.global __hexagon_fast_muldf3 ; .set __hexagon_fast_muldf3, __hexagon_muldf3
.global __hexagon_fast2_muldf3 ; .set __hexagon_fast2_muldf3, __hexagon_muldf3
.p2align 5
__hexagon_muldf3:
{
p0 = dfclass(r1:0,#2)
p0 = dfclass(r3:2,#2)
r13:12 = combine(##0x40000000,#0)
}
{
r13:12 = insert(r1:0,#52,#11 -1)
r5:4 = asl(r3:2,#11 -1)
r28 = #-1024
r9:8 = #1
}
{
r7:6 = mpyu(r4,r13)
r5:4 = insert(r9:8,#2,#62)
}
{
r15:14 = mpyu(r12,r4)
r7:6 += mpyu(r12,r5)
}
{
r7:6 += lsr(r15:14,#32)
r11:10 = mpyu(r13,r5)
r5:4 = combine(##1024 +1024 -4,#0)
}
{
r11:10 += lsr(r7:6,#32)
if (!p0) jump .Lmul_abnormal
p1 = cmp.eq(r14,#0)
p1 = cmp.eq(r6,#0)
}
{
if (!p1) r10 = or(r10,r8)
r6 = extractu(r1,#11,#20)
r7 = extractu(r3,#11,#20)
}
{
r15:14 = neg(r11:10)
r6 += add(r28,r7)
r28 = xor(r1,r3)
}
{
if (!p2.new) r11:10 = r15:14
p2 = cmp.gt(r28,#-1)
p0 = !cmp.gt(r6,r5)
p0 = cmp.gt(r6,r4)
if (!p0.new) jump:nt .Lmul_ovf_unf
}
{
r1:0 = convert_d2df(r11:10)
r6 = add(r6,#-1024 -58)
}
{
r1 += asl(r6,#20)
jumpr r31
}
.falign
.Lpossible_unf1:
{
p0 = cmp.eq(r0,#0)
p0 = bitsclr(r1,r4)
if (!p0.new) jumpr:t r31
r5 = #0x7fff
}
{
p0 = bitsset(r13,r5)
r4 = USR
r5 = #0x030
}
{
if (p0) r4 = or(r4,r5)
}
{
USR = r4
}
{
p0 = dfcmp.eq(r1:0,r1:0)
jumpr r31
}
.falign
.Lmul_ovf_unf:
{
r1:0 = convert_d2df(r11:10)
r13:12 = abs(r11:10)
r7 = add(r6,#-1024 -58)
}
{
r1 += asl(r7,#20)
r7 = extractu(r1,#11,#20)
r4 = ##0x7FEFFFFF
}
{
r7 += add(r6,##-1024 -58)
r5 = #0
}
{
p0 = cmp.gt(r7,##1024 +1024 -2)
if (p0.new) jump:nt .Lmul_ovf
}
{
p0 = cmp.gt(r7,#0)
if (p0.new) jump:nt .Lpossible_unf1
r5 = sub(r6,r5)
r28 = #63
}
{
r4 = #0
r5 = sub(#5,r5)
}
{
p3 = cmp.gt(r11,#-1)
r5 = min(r5,r28)
r11:10 = r13:12
}
{
r28 = USR
r15:14 = extractu(r11:10,r5:4)
}
{
r11:10 = asr(r11:10,r5)
r4 = #0x0030
r1 = insert(r9,#11,#20)
}
{
p0 = cmp.gtu(r9:8,r15:14)
if (!p0.new) r10 = or(r10,r8)
r11 = setbit(r11,#20 +3)
}
{
r15:14 = neg(r11:10)
p1 = bitsclr(r10,#0x7)
if (!p1.new) r28 = or(r4,r28)
}
{
if (!p3) r11:10 = r15:14
USR = r28
}
{
r1:0 = convert_d2df(r11:10)
p0 = dfcmp.eq(r1:0,r1:0)
}
{
r1 = insert(r9,#11 -1,#20 +1)
jumpr r31
}
.falign
.Lmul_ovf:
{
r28 = USR
r13:12 = combine(##0x7fefffff,#-1)
r1:0 = r11:10
}
{
r14 = extractu(r28,#2,#22)
r28 = or(r28,#0x28)
r5:4 = combine(##0x7ff00000,#0)
}
{
USR = r28
r14 ^= lsr(r1,#31)
r28 = r14
}
{
p0 = !cmp.eq(r28,#1)
p0 = !cmp.eq(r14,#2)
if (p0.new) r13:12 = r5:4
p0 = dfcmp.eq(r1:0,r1:0)
}
{
r1:0 = insert(r13:12,#63,#0)
jumpr r31
}
.Lmul_abnormal:
{
r13:12 = extractu(r1:0,#63,#0)
r5:4 = extractu(r3:2,#63,#0)
}
{
p3 = cmp.gtu(r13:12,r5:4)
if (!p3.new) r1:0 = r3:2
if (!p3.new) r3:2 = r1:0
}
{
p0 = dfclass(r1:0,#0x0f)
if (!p0.new) jump:nt .Linvalid_nan
if (!p3) r13:12 = r5:4
if (!p3) r5:4 = r13:12
}
{
p1 = dfclass(r1:0,#0x08)
p1 = dfclass(r3:2,#0x0e)
}
{
p0 = dfclass(r1:0,#0x08)
p0 = dfclass(r3:2,#0x01)
}
{
if (p1) jump .Ltrue_inf
p2 = dfclass(r3:2,#0x01)
}
{
if (p0) jump .Linvalid_zeroinf
if (p2) jump .Ltrue_zero
r28 = ##0x7c000000
}
{
p0 = bitsclr(r1,r28)
if (p0.new) jump:nt .Lmul_tiny
}
{
r28 = cl0(r5:4)
}
{
r28 = add(r28,#-11)
}
{
r5:4 = asl(r5:4,r28)
}
{
r3:2 = insert(r5:4,#63,#0)
r1 -= asl(r28,#20)
}
jump __hexagon_muldf3
.Lmul_tiny:
{
r28 = USR
r1:0 = xor(r1:0,r3:2)
}
{
r28 = or(r28,#0x30)
r1:0 = insert(r9:8,#63,#0)
r5 = extractu(r28,#2,#22)
}
{
USR = r28
p0 = cmp.gt(r5,#1)
if (!p0.new) r0 = #0
r5 ^= lsr(r1,#31)
}
{
p0 = cmp.eq(r5,#3)
if (!p0.new) r0 = #0
jumpr r31
}
.Linvalid_zeroinf:
{
r28 = USR
}
{
r1:0 = #-1
r28 = or(r28,#2)
}
{
USR = r28
}
{
p0 = dfcmp.uo(r1:0,r1:0)
jumpr r31
}
.Linvalid_nan:
{
p0 = dfclass(r3:2,#0x0f)
r28 = convert_df2sf(r1:0)
if (p0.new) r3:2 = r1:0
}
{
r2 = convert_df2sf(r3:2)
r1:0 = #-1
jumpr r31
}
.falign
.Ltrue_zero:
{
r1:0 = r3:2
r3:2 = r1:0
}
.Ltrue_inf:
{
r3 = extract(r3,#1,#31)
}
{
r1 ^= asl(r3,#31)
jumpr r31
}
.size __hexagon_muldf3,.-__hexagon_muldf3
|
CHERIoT-Platform/cheri-rust
| 7,236
|
library/compiler-builtins/compiler-builtins/src/hexagon/dffma.s
|
.text
.global __hexagon_fmadf4
.type __hexagon_fmadf4,@function
.global __hexagon_fmadf5
.type __hexagon_fmadf5,@function
.global __qdsp_fmadf5 ; .set __qdsp_fmadf5, __hexagon_fmadf5
.p2align 5
__hexagon_fmadf4:
__hexagon_fmadf5:
fma:
{
p0 = dfclass(r1:0,#2)
p0 = dfclass(r3:2,#2)
r13:12 = #0
r15:14 = #0
}
{
r13:12 = insert(r1:0,#52,#11 -3)
r15:14 = insert(r3:2,#52,#11 -3)
r7 = ##0x10000000
allocframe(#32)
}
{
r9:8 = mpyu(r12,r14)
if (!p0) jump .Lfma_abnormal_ab
r13 = or(r13,r7)
r15 = or(r15,r7)
}
{
p0 = dfclass(r5:4,#2)
if (!p0.new) jump:nt .Lfma_abnormal_c
r11:10 = combine(r7,#0)
r7:6 = combine(#0,r9)
}
.Lfma_abnormal_c_restart:
{
r7:6 += mpyu(r14,r13)
r11:10 = insert(r5:4,#52,#11 -3)
memd(r29+#0) = r17:16
memd(r29+#8) = r19:18
}
{
r7:6 += mpyu(r12,r15)
r19:18 = neg(r11:10)
p0 = cmp.gt(r5,#-1)
r28 = xor(r1,r3)
}
{
r18 = extractu(r1,#11,#20)
r19 = extractu(r3,#11,#20)
r17:16 = combine(#0,r7)
if (!p0) r11:10 = r19:18
}
{
r17:16 += mpyu(r13,r15)
r9:8 = combine(r6,r8)
r18 = add(r18,r19)
r19 = extractu(r5,#11,#20)
}
{
r18 = add(r18,#-1023 +(4))
p3 = !cmp.gt(r28,#-1)
r7:6 = #0
r15:14 = #0
}
{
r7:6 = sub(r7:6,r9:8,p3):carry
p0 = !cmp.gt(r28,#-1)
p1 = cmp.gt(r19,r18)
if (p1.new) r19:18 = combine(r18,r19)
}
{
r15:14 = sub(r15:14,r17:16,p3):carry
if (p0) r9:8 = r7:6
r7:6 = #0
r19 = sub(r18,r19)
}
{
if (p0) r17:16 = r15:14
p0 = cmp.gt(r19,#63)
if (p1) r9:8 = r7:6
if (p1) r7:6 = r9:8
}
{
if (p1) r17:16 = r11:10
if (p1) r11:10 = r17:16
if (p0) r19 = add(r19,#-64)
r28 = #63
}
{
if (p0) r7:6 = r11:10
r28 = asr(r11,#31)
r13 = min(r19,r28)
r12 = #0
}
{
if (p0) r11:10 = combine(r28,r28)
r5:4 = extract(r7:6,r13:12)
r7:6 = lsr(r7:6,r13)
r12 = sub(#64,r13)
}
{
r15:14 = #0
r28 = #-2
r7:6 |= lsl(r11:10,r12)
r11:10 = asr(r11:10,r13)
}
{
p3 = cmp.gtu(r5:4,r15:14)
if (p3.new) r6 = and(r6,r28)
r15:14 = #1
r5:4 = #0
}
{
r9:8 = add(r7:6,r9:8,p3):carry
}
{
r17:16 = add(r11:10,r17:16,p3):carry
r28 = #62
}
{
r12 = add(clb(r17:16),#-2)
if (!cmp.eq(r12.new,r28)) jump:t 1f
}
{
r11:10 = extractu(r9:8,#62,#2)
r9:8 = asl(r9:8,#62)
r18 = add(r18,#-62)
}
{
r17:16 = insert(r11:10,#62,#0)
}
{
r12 = add(clb(r17:16),#-2)
}
.falign
1:
{
r11:10 = asl(r17:16,r12)
r5:4 |= asl(r9:8,r12)
r13 = sub(#64,r12)
r18 = sub(r18,r12)
}
{
r11:10 |= lsr(r9:8,r13)
p2 = cmp.gtu(r15:14,r5:4)
r28 = #1023 +1023 -2
}
{
if (!p2) r10 = or(r10,r14)
p0 = !cmp.gt(r18,r28)
p0 = cmp.gt(r18,#1)
if (!p0.new) jump:nt .Lfma_ovf_unf
}
{
p0 = cmp.gtu(r15:14,r11:10)
r1:0 = convert_d2df(r11:10)
r18 = add(r18,#-1023 -60)
r17:16 = memd(r29+#0)
}
{
r1 += asl(r18,#20)
r19:18 = memd(r29+#8)
if (!p0) dealloc_return
}
.Ladd_yields_zero:
{
r28 = USR
r1:0 = #0
}
{
r28 = extractu(r28,#2,#22)
r17:16 = memd(r29+#0)
r19:18 = memd(r29+#8)
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = ##0x80000000
dealloc_return
}
.Lfma_ovf_unf:
{
p0 = cmp.gtu(r15:14,r11:10)
if (p0.new) jump:nt .Ladd_yields_zero
}
{
r1:0 = convert_d2df(r11:10)
r18 = add(r18,#-1023 -60)
r28 = r18
}
{
r1 += asl(r18,#20)
r7 = extractu(r1,#11,#20)
}
{
r6 = add(r18,r7)
r17:16 = memd(r29+#0)
r19:18 = memd(r29+#8)
r9:8 = abs(r11:10)
}
{
p0 = cmp.gt(r6,##1023 +1023)
if (p0.new) jump:nt .Lfma_ovf
}
{
p0 = cmp.gt(r6,#0)
if (p0.new) jump:nt .Lpossible_unf0
}
{
r7 = add(clb(r9:8),#-2)
r6 = sub(#1+5,r28)
p3 = cmp.gt(r11,#-1)
}
{
r6 = add(r6,r7)
r9:8 = asl(r9:8,r7)
r1 = USR
r28 = #63
}
{
r7 = min(r6,r28)
r6 = #0
r0 = #0x0030
}
{
r3:2 = extractu(r9:8,r7:6)
r9:8 = asr(r9:8,r7)
}
{
p0 = cmp.gtu(r15:14,r3:2)
if (!p0.new) r8 = or(r8,r14)
r9 = setbit(r9,#20 +3)
}
{
r11:10 = neg(r9:8)
p1 = bitsclr(r8,#(1<<3)-1)
if (!p1.new) r1 = or(r1,r0)
r3:2 = #0
}
{
if (p3) r11:10 = r9:8
USR = r1
r28 = #-1023 -(52 +3)
}
{
r1:0 = convert_d2df(r11:10)
}
{
r1 += asl(r28,#20)
dealloc_return
}
.Lpossible_unf0:
{
r28 = ##0x7fefffff
r9:8 = abs(r11:10)
}
{
p0 = cmp.eq(r0,#0)
p0 = bitsclr(r1,r28)
if (!p0.new) dealloc_return:t
r28 = #0x7fff
}
{
p0 = bitsset(r9,r28)
r3 = USR
r2 = #0x0030
}
{
if (p0) r3 = or(r3,r2)
}
{
USR = r3
}
{
p0 = dfcmp.eq(r1:0,r1:0)
dealloc_return
}
.Lfma_ovf:
{
r28 = USR
r11:10 = combine(##0x7fefffff,#-1)
r1:0 = r11:10
}
{
r9:8 = combine(##0x7ff00000,#0)
r3 = extractu(r28,#2,#22)
r28 = or(r28,#0x28)
}
{
USR = r28
r3 ^= lsr(r1,#31)
r2 = r3
}
{
p0 = !cmp.eq(r2,#1)
p0 = !cmp.eq(r3,#2)
}
{
p0 = dfcmp.eq(r9:8,r9:8)
if (p0.new) r11:10 = r9:8
}
{
r1:0 = insert(r11:10,#63,#0)
dealloc_return
}
.Lfma_abnormal_ab:
{
r9:8 = extractu(r1:0,#63,#0)
r11:10 = extractu(r3:2,#63,#0)
deallocframe
}
{
p3 = cmp.gtu(r9:8,r11:10)
if (!p3.new) r1:0 = r3:2
if (!p3.new) r3:2 = r1:0
}
{
p0 = dfclass(r1:0,#0x0f)
if (!p0.new) jump:nt .Lnan
if (!p3) r9:8 = r11:10
if (!p3) r11:10 = r9:8
}
{
p1 = dfclass(r1:0,#0x08)
p1 = dfclass(r3:2,#0x0e)
}
{
p0 = dfclass(r1:0,#0x08)
p0 = dfclass(r3:2,#0x01)
}
{
if (p1) jump .Lab_inf
p2 = dfclass(r3:2,#0x01)
}
{
if (p0) jump .Linvalid
if (p2) jump .Lab_true_zero
r28 = ##0x7c000000
}
{
p0 = bitsclr(r1,r28)
if (p0.new) jump:nt .Lfma_ab_tiny
}
{
r28 = add(clb(r11:10),#-11)
}
{
r11:10 = asl(r11:10,r28)
}
{
r3:2 = insert(r11:10,#63,#0)
r1 -= asl(r28,#20)
}
jump fma
.Lfma_ab_tiny:
r9:8 = combine(##0x00100000,#0)
{
r1:0 = insert(r9:8,#63,#0)
r3:2 = insert(r9:8,#63,#0)
}
jump fma
.Lab_inf:
{
r3:2 = lsr(r3:2,#63)
p0 = dfclass(r5:4,#0x10)
}
{
r1:0 ^= asl(r3:2,#63)
if (p0) jump .Lnan
}
{
p1 = dfclass(r5:4,#0x08)
if (p1.new) jump:nt .Lfma_inf_plus_inf
}
{
jumpr r31
}
.falign
.Lfma_inf_plus_inf:
{
p0 = dfcmp.eq(r1:0,r5:4)
if (!p0.new) jump:nt .Linvalid
}
{
jumpr r31
}
.Lnan:
{
p0 = dfclass(r3:2,#0x10)
p1 = dfclass(r5:4,#0x10)
if (!p0.new) r3:2 = r1:0
if (!p1.new) r5:4 = r1:0
}
{
r3 = convert_df2sf(r3:2)
r2 = convert_df2sf(r5:4)
}
{
r3 = convert_df2sf(r1:0)
r1:0 = #-1
jumpr r31
}
.Linvalid:
{
r28 = ##0x7f800001
}
{
r1:0 = convert_sf2df(r28)
jumpr r31
}
.Lab_true_zero:
{
p0 = dfclass(r5:4,#0x10)
if (p0.new) jump:nt .Lnan
if (p0.new) r1:0 = r5:4
}
{
p0 = dfcmp.eq(r3:2,r5:4)
r1 = lsr(r1,#31)
}
{
r3 ^= asl(r1,#31)
if (!p0) r1:0 = r5:4
if (!p0) jumpr r31
}
{
p0 = cmp.eq(r3:2,r5:4)
if (p0.new) jumpr:t r31
r1:0 = r3:2
}
{
r28 = USR
}
{
r28 = extractu(r28,#2,#22)
r1:0 = #0
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = ##0x80000000
jumpr r31
}
.falign
.Lfma_abnormal_c:
{
p0 = dfclass(r5:4,#0x10)
if (p0.new) jump:nt .Lnan
if (p0.new) r1:0 = r5:4
deallocframe
}
{
p0 = dfclass(r5:4,#0x08)
if (p0.new) r1:0 = r5:4
if (p0.new) jumpr:nt r31
}
{
p0 = dfclass(r5:4,#0x01)
if (p0.new) jump:nt __hexagon_muldf3
r28 = #1
}
{
allocframe(#32)
r11:10 = #0
r5 = insert(r28,#11,#20)
jump .Lfma_abnormal_c_restart
}
.size fma,.-fma
|
CHERIoT-Platform/cheri-rust
| 4,801
|
library/compiler-builtins/compiler-builtins/src/hexagon/dfaddsub.s
|
.text
.global __hexagon_adddf3
.global __hexagon_subdf3
.type __hexagon_adddf3, @function
.type __hexagon_subdf3, @function
.global __qdsp_adddf3 ; .set __qdsp_adddf3, __hexagon_adddf3
.global __hexagon_fast_adddf3 ; .set __hexagon_fast_adddf3, __hexagon_adddf3
.global __hexagon_fast2_adddf3 ; .set __hexagon_fast2_adddf3, __hexagon_adddf3
.global __qdsp_subdf3 ; .set __qdsp_subdf3, __hexagon_subdf3
.global __hexagon_fast_subdf3 ; .set __hexagon_fast_subdf3, __hexagon_subdf3
.global __hexagon_fast2_subdf3 ; .set __hexagon_fast2_subdf3, __hexagon_subdf3
.p2align 5
__hexagon_adddf3:
{
r4 = extractu(r1,#11,#20)
r5 = extractu(r3,#11,#20)
r13:12 = combine(##0x20000000,#0)
}
{
p3 = dfclass(r1:0,#2)
p3 = dfclass(r3:2,#2)
r9:8 = r13:12
p2 = cmp.gtu(r5,r4)
}
{
if (!p3) jump .Ladd_abnormal
if (p2) r1:0 = r3:2
if (p2) r3:2 = r1:0
if (p2) r5:4 = combine(r4,r5)
}
{
r13:12 = insert(r1:0,#52,#11 -2)
r9:8 = insert(r3:2,#52,#11 -2)
r15 = sub(r4,r5)
r7:6 = combine(#62,#1)
}
.Ladd_continue:
{
r15 = min(r15,r7)
r11:10 = neg(r13:12)
p2 = cmp.gt(r1,#-1)
r14 = #0
}
{
if (!p2) r13:12 = r11:10
r11:10 = extractu(r9:8,r15:14)
r9:8 = ASR(r9:8,r15)
r15:14 = #0
}
{
p1 = cmp.eq(r11:10,r15:14)
if (!p1.new) r8 = or(r8,r6)
r5 = add(r4,#-1024 -60)
p3 = cmp.gt(r3,#-1)
}
{
r13:12 = add(r13:12,r9:8)
r11:10 = sub(r13:12,r9:8)
r7:6 = combine(#54,##2045)
}
{
p0 = cmp.gtu(r4,r7)
p0 = !cmp.gtu(r4,r6)
if (!p0.new) jump:nt .Ladd_ovf_unf
if (!p3) r13:12 = r11:10
}
{
r1:0 = convert_d2df(r13:12)
p0 = cmp.eq(r13,#0)
p0 = cmp.eq(r12,#0)
if (p0.new) jump:nt .Ladd_zero
}
{
r1 += asl(r5,#20)
jumpr r31
}
.falign
__hexagon_subdf3:
{
r3 = togglebit(r3,#31)
jump __qdsp_adddf3
}
.falign
.Ladd_zero:
{
r28 = USR
r1:0 = #0
r3 = #1
}
{
r28 = extractu(r28,#2,#22)
r3 = asl(r3,#31)
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = xor(r1,r3)
jumpr r31
}
.falign
.Ladd_ovf_unf:
{
r1:0 = convert_d2df(r13:12)
p0 = cmp.eq(r13,#0)
p0 = cmp.eq(r12,#0)
if (p0.new) jump:nt .Ladd_zero
}
{
r28 = extractu(r1,#11,#20)
r1 += asl(r5,#20)
}
{
r5 = add(r5,r28)
r3:2 = combine(##0x00100000,#0)
}
{
p0 = cmp.gt(r5,##1024 +1024 -2)
if (p0.new) jump:nt .Ladd_ovf
}
{
p0 = cmp.gt(r5,#0)
if (p0.new) jumpr:t r31
r28 = sub(#1,r5)
}
{
r3:2 = insert(r1:0,#52,#0)
r1:0 = r13:12
}
{
r3:2 = lsr(r3:2,r28)
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.falign
.Ladd_ovf:
{
r1:0 = r13:12
r28 = USR
r13:12 = combine(##0x7fefffff,#-1)
}
{
r5 = extractu(r28,#2,#22)
r28 = or(r28,#0x28)
r9:8 = combine(##0x7ff00000,#0)
}
{
USR = r28
r5 ^= lsr(r1,#31)
r28 = r5
}
{
p0 = !cmp.eq(r28,#1)
p0 = !cmp.eq(r5,#2)
if (p0.new) r13:12 = r9:8
}
{
r1:0 = insert(r13:12,#63,#0)
}
{
p0 = dfcmp.eq(r1:0,r1:0)
jumpr r31
}
.Ladd_abnormal:
{
r13:12 = extractu(r1:0,#63,#0)
r9:8 = extractu(r3:2,#63,#0)
}
{
p3 = cmp.gtu(r13:12,r9:8)
if (!p3.new) r1:0 = r3:2
if (!p3.new) r3:2 = r1:0
}
{
p0 = dfclass(r1:0,#0x0f)
if (!p0.new) jump:nt .Linvalid_nan_add
if (!p3) r13:12 = r9:8
if (!p3) r9:8 = r13:12
}
{
p1 = dfclass(r1:0,#0x08)
if (p1.new) jump:nt .Linf_add
}
{
p2 = dfclass(r3:2,#0x01)
if (p2.new) jump:nt .LB_zero
r13:12 = #0
}
{
p0 = dfclass(r1:0,#4)
if (p0.new) jump:nt .Ladd_two_subnormal
r13:12 = combine(##0x20000000,#0)
}
{
r4 = extractu(r1,#11,#20)
r5 = #1
r9:8 = asl(r9:8,#11 -2)
}
{
r13:12 = insert(r1:0,#52,#11 -2)
r15 = sub(r4,r5)
r7:6 = combine(#62,#1)
jump .Ladd_continue
}
.Ladd_two_subnormal:
{
r13:12 = extractu(r1:0,#63,#0)
r9:8 = extractu(r3:2,#63,#0)
}
{
r13:12 = neg(r13:12)
r9:8 = neg(r9:8)
p0 = cmp.gt(r1,#-1)
p1 = cmp.gt(r3,#-1)
}
{
if (p0) r13:12 = r1:0
if (p1) r9:8 = r3:2
}
{
r13:12 = add(r13:12,r9:8)
}
{
r9:8 = neg(r13:12)
p0 = cmp.gt(r13,#-1)
r3:2 = #0
}
{
if (!p0) r1:0 = r9:8
if (p0) r1:0 = r13:12
r3 = ##0x80000000
}
{
if (!p0) r1 = or(r1,r3)
p0 = dfcmp.eq(r1:0,r3:2)
if (p0.new) jump:nt .Lzero_plus_zero
}
{
jumpr r31
}
.Linvalid_nan_add:
{
r28 = convert_df2sf(r1:0)
p0 = dfclass(r3:2,#0x0f)
if (p0.new) r3:2 = r1:0
}
{
r2 = convert_df2sf(r3:2)
r1:0 = #-1
jumpr r31
}
.falign
.LB_zero:
{
p0 = dfcmp.eq(r13:12,r1:0)
if (!p0.new) jumpr:t r31
}
.Lzero_plus_zero:
{
p0 = cmp.eq(r1:0,r3:2)
if (p0.new) jumpr:t r31
}
{
r28 = USR
}
{
r28 = extractu(r28,#2,#22)
r1:0 = #0
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = ##0x80000000
jumpr r31
}
.Linf_add:
{
p0 = !cmp.eq(r1,r3)
p0 = dfclass(r3:2,#8)
if (!p0.new) jumpr:t r31
}
{
r2 = ##0x7f800001
}
{
r1:0 = convert_sf2df(r2)
jumpr r31
}
.size __hexagon_adddf3,.-__hexagon_adddf3
|
CHERIoT-Platform/cheri-rust
| 1,295
|
library/compiler-builtins/compiler-builtins/src/hexagon/memcpy_forward_vp4cp4n2.s
|
.text
.globl hexagon_memcpy_forward_vp4cp4n2
.balign 32
.type hexagon_memcpy_forward_vp4cp4n2,@function
hexagon_memcpy_forward_vp4cp4n2:
{
r3 = sub(##4096, r1)
r5 = lsr(r2, #3)
}
{
r3 = extractu(r3, #10, #2)
r4 = extractu(r3, #7, #5)
}
{
r3 = minu(r2, r3)
r4 = minu(r5, r4)
}
{
r4 = or(r4, ##2105344)
p0 = cmp.eq(r3, #0)
if (p0.new) jump:nt .Lskipprolog
}
l2fetch(r1, r4)
{
loop0(.Lprolog, r3)
r2 = sub(r2, r3)
}
.falign
.Lprolog:
{
r4 = memw(r1++#4)
memw(r0++#4) = r4.new
} :endloop0
.Lskipprolog:
{
r3 = lsr(r2, #10)
if (cmp.eq(r3.new, #0)) jump:nt .Lskipmain
}
{
loop1(.Lout, r3)
r2 = extractu(r2, #10, #0)
r3 = ##2105472
}
.falign
.Lout:
l2fetch(r1, r3)
loop0(.Lpage, #512)
.falign
.Lpage:
r5:4 = memd(r1++#8)
{
memw(r0++#8) = r4
memw(r0+#4) = r5
} :endloop0:endloop1
.Lskipmain:
{
r3 = ##2105344
r4 = lsr(r2, #3)
p0 = cmp.eq(r2, #0)
if (p0.new) jumpr:nt r31
}
{
r3 = or(r3, r4)
loop0(.Lepilog, r2)
}
l2fetch(r1, r3)
.falign
.Lepilog:
{
r4 = memw(r1++#4)
memw(r0++#4) = r4.new
} :endloop0
jumpr r31
.size hexagon_memcpy_forward_vp4cp4n2, . - hexagon_memcpy_forward_vp4cp4n2
|
CHERIoT-Platform/cheri-rust
| 5,659
|
library/compiler-builtins/compiler-builtins/src/hexagon/dfdiv.s
|
.text
.global __hexagon_divdf3
.type __hexagon_divdf3,@function
.global __qdsp_divdf3 ; .set __qdsp_divdf3, __hexagon_divdf3
.global __hexagon_fast_divdf3 ; .set __hexagon_fast_divdf3, __hexagon_divdf3
.global __hexagon_fast2_divdf3 ; .set __hexagon_fast2_divdf3, __hexagon_divdf3
.p2align 5
__hexagon_divdf3:
{
p2 = dfclass(r1:0,#0x02)
p2 = dfclass(r3:2,#0x02)
r13:12 = combine(r3,r1)
r28 = xor(r1,r3)
}
{
if (!p2) jump .Ldiv_abnormal
r7:6 = extractu(r3:2,#23,#52 -23)
r8 = ##0x3f800001
}
{
r9 = or(r8,r6)
r13 = extractu(r13,#11,#52 -32)
r12 = extractu(r12,#11,#52 -32)
p3 = cmp.gt(r28,#-1)
}
.Ldenorm_continue:
{
r11,p0 = sfrecipa(r8,r9)
r10 = and(r8,#-2)
r28 = #1
r12 = sub(r12,r13)
}
{
r10 -= sfmpy(r11,r9):lib
r1 = insert(r28,#11 +1,#52 -32)
r13 = ##0x00800000 << 3
}
{
r11 += sfmpy(r11,r10):lib
r3 = insert(r28,#11 +1,#52 -32)
r10 = and(r8,#-2)
}
{
r10 -= sfmpy(r11,r9):lib
r5 = #-0x3ff +1
r4 = #0x3ff -1
}
{
r11 += sfmpy(r11,r10):lib
p1 = cmp.gt(r12,r5)
p1 = !cmp.gt(r12,r4)
}
{
r13 = insert(r11,#23,#3)
r5:4 = #0
r12 = add(r12,#-61)
}
{
r13 = add(r13,#((-3) << 3))
}
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASL(r7:6, # ( 14 )); r1:0 -= asl(r15:14, # 32); }
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 1 )); r1:0 -= asl(r15:14, # 32); }
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 16 )); r1:0 -= asl(r15:14, # 32); }
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 31 )); r1:0 -= asl(r15:14, # 32); r7:6=# ( 0 ); }
{
r15:14 = sub(r1:0,r3:2)
p0 = cmp.gtu(r3:2,r1:0)
if (!p0.new) r6 = #2
}
{
r5:4 = add(r5:4,r7:6)
if (!p0) r1:0 = r15:14
r15:14 = #0
}
{
p0 = cmp.eq(r1:0,r15:14)
if (!p0.new) r4 = or(r4,r28)
}
{
r7:6 = neg(r5:4)
}
{
if (!p3) r5:4 = r7:6
}
{
r1:0 = convert_d2df(r5:4)
if (!p1) jump .Ldiv_ovf_unf
}
{
r1 += asl(r12,#52 -32)
jumpr r31
}
.Ldiv_ovf_unf:
{
r1 += asl(r12,#52 -32)
r13 = extractu(r1,#11,#52 -32)
}
{
r7:6 = abs(r5:4)
r12 = add(r12,r13)
}
{
p0 = cmp.gt(r12,##0x3ff +0x3ff)
if (p0.new) jump:nt .Ldiv_ovf
}
{
p0 = cmp.gt(r12,#0)
if (p0.new) jump:nt .Lpossible_unf2
}
{
r13 = add(clb(r7:6),#-1)
r12 = sub(#7,r12)
r10 = USR
r11 = #63
}
{
r13 = min(r12,r11)
r11 = or(r10,#0x030)
r7:6 = asl(r7:6,r13)
r12 = #0
}
{
r15:14 = extractu(r7:6,r13:12)
r7:6 = lsr(r7:6,r13)
r3:2 = #1
}
{
p0 = cmp.gtu(r3:2,r15:14)
if (!p0.new) r6 = or(r2,r6)
r7 = setbit(r7,#52 -32+4)
}
{
r5:4 = neg(r7:6)
p0 = bitsclr(r6,#(1<<4)-1)
if (!p0.new) r10 = r11
}
{
USR = r10
if (p3) r5:4 = r7:6
r10 = #-0x3ff -(52 +4)
}
{
r1:0 = convert_d2df(r5:4)
}
{
r1 += asl(r10,#52 -32)
jumpr r31
}
.Lpossible_unf2:
{
r3:2 = extractu(r1:0,#63,#0)
r15:14 = combine(##0x00100000,#0)
r10 = #0x7FFF
}
{
p0 = dfcmp.eq(r15:14,r3:2)
p0 = bitsset(r7,r10)
}
{
if (!p0) jumpr r31
r10 = USR
}
{
r10 = or(r10,#0x30)
}
{
USR = r10
}
{
p0 = dfcmp.eq(r1:0,r1:0)
jumpr r31
}
.Ldiv_ovf:
{
r10 = USR
r3:2 = combine(##0x7fefffff,#-1)
r1 = mux(p3,#0,#-1)
}
{
r7:6 = combine(##0x7ff00000,#0)
r5 = extractu(r10,#2,#22)
r10 = or(r10,#0x28)
}
{
USR = r10
r5 ^= lsr(r1,#31)
r4 = r5
}
{
p0 = !cmp.eq(r4,#1)
p0 = !cmp.eq(r5,#2)
if (p0.new) r3:2 = r7:6
p0 = dfcmp.eq(r3:2,r3:2)
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.Ldiv_abnormal:
{
p0 = dfclass(r1:0,#0x0F)
p0 = dfclass(r3:2,#0x0F)
p3 = cmp.gt(r28,#-1)
}
{
p1 = dfclass(r1:0,#0x08)
p1 = dfclass(r3:2,#0x08)
}
{
p2 = dfclass(r1:0,#0x01)
p2 = dfclass(r3:2,#0x01)
}
{
if (!p0) jump .Ldiv_nan
if (p1) jump .Ldiv_invalid
}
{
if (p2) jump .Ldiv_invalid
}
{
p2 = dfclass(r1:0,#(0x0F ^ 0x01))
p2 = dfclass(r3:2,#(0x0F ^ 0x08))
}
{
p1 = dfclass(r1:0,#(0x0F ^ 0x08))
p1 = dfclass(r3:2,#(0x0F ^ 0x01))
}
{
if (!p2) jump .Ldiv_zero_result
if (!p1) jump .Ldiv_inf_result
}
{
p0 = dfclass(r1:0,#0x02)
p1 = dfclass(r3:2,#0x02)
r10 = ##0x00100000
}
{
r13:12 = combine(r3,r1)
r1 = insert(r10,#11 +1,#52 -32)
r3 = insert(r10,#11 +1,#52 -32)
}
{
if (p0) r1 = or(r1,r10)
if (p1) r3 = or(r3,r10)
}
{
r5 = add(clb(r1:0),#-11)
r4 = add(clb(r3:2),#-11)
r10 = #1
}
{
r12 = extractu(r12,#11,#52 -32)
r13 = extractu(r13,#11,#52 -32)
}
{
r1:0 = asl(r1:0,r5)
r3:2 = asl(r3:2,r4)
if (!p0) r12 = sub(r10,r5)
if (!p1) r13 = sub(r10,r4)
}
{
r7:6 = extractu(r3:2,#23,#52 -23)
}
{
r9 = or(r8,r6)
jump .Ldenorm_continue
}
.Ldiv_zero_result:
{
r1 = xor(r1,r3)
r3:2 = #0
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.Ldiv_inf_result:
{
p2 = dfclass(r3:2,#0x01)
p2 = dfclass(r1:0,#(0x0F ^ 0x08))
}
{
r10 = USR
if (!p2) jump 1f
r1 = xor(r1,r3)
}
{
r10 = or(r10,#0x04)
}
{
USR = r10
}
1:
{
r3:2 = combine(##0x7ff00000,#0)
p0 = dfcmp.uo(r3:2,r3:2)
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.Ldiv_nan:
{
p0 = dfclass(r1:0,#0x10)
p1 = dfclass(r3:2,#0x10)
if (!p0.new) r1:0 = r3:2
if (!p1.new) r3:2 = r1:0
}
{
r5 = convert_df2sf(r1:0)
r4 = convert_df2sf(r3:2)
}
{
r1:0 = #-1
jumpr r31
}
.Ldiv_invalid:
{
r10 = ##0x7f800001
}
{
r1:0 = convert_sf2df(r10)
jumpr r31
}
.size __hexagon_divdf3,.-__hexagon_divdf3
|
CHERIoT-Platform/cheri-rust
| 5,120
|
library/compiler-builtins/compiler-builtins/src/hexagon/fastmath2_dlib_asm.s
|
.text
.global __hexagon_fast2_dadd_asm
.type __hexagon_fast2_dadd_asm, @function
__hexagon_fast2_dadd_asm:
.falign
{
R7:6 = VABSDIFFH(R1:0, R3:2)
R9 = #62
R4 = SXTH(R0)
R5 = SXTH(R2)
} {
R6 = SXTH(R6)
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
if ( P0) R4 = #1
if (!P0) R5 = #1
R0.L = #0
R6 = MIN(R6, R9)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
R2.L = #0
R11:10 = #0
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = add(R1:0, R3:2)
R10.L = #0x8001
} {
R4 = clb(R1:0)
R9 = #58
} {
R4 = add(R4, #-1)
p0 = cmp.gt(R4, R9)
} {
R1:0 = ASL(R1:0, R4)
R8 = SUB(R8, R4)
if(p0) jump .Ldenorma
} {
R0 = insert(R8, #16, #0)
jumpr r31
}
.Ldenorma:
{
R1:0 = R11:10
jumpr r31
}
.text
.global __hexagon_fast2_dsub_asm
.type __hexagon_fast2_dsub_asm, @function
__hexagon_fast2_dsub_asm:
.falign
{
R7:6 = VABSDIFFH(R1:0, R3:2)
R9 = #62
R4 = SXTH(R0)
R5 = SXTH(R2)
} {
R6 = SXTH(R6)
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
if ( P0) R4 = #1
if (!P0) R5 = #1
R0.L = #0
R6 = MIN(R6, R9)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
R2.L = #0
R11:10 = #0
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = sub(R1:0, R3:2)
R10.L = #0x8001
} {
R4 = clb(R1:0)
R9 = #58
} {
R4 = add(R4, #-1)
p0 = cmp.gt(R4, R9)
} {
R1:0 = ASL(R1:0, R4)
R8 = SUB(R8, R4)
if(p0) jump .Ldenorm
} {
R0 = insert(R8, #16, #0)
jumpr r31
}
.Ldenorm:
{
R1:0 = R11:10
jumpr r31
}
.text
.global __hexagon_fast2_dmpy_asm
.type __hexagon_fast2_dmpy_asm, @function
__hexagon_fast2_dmpy_asm:
.falign
{
R13= lsr(R2, #16)
R5 = sxth(R2)
R4 = sxth(R0)
R12= lsr(R0, #16)
}
{
R11:10 = mpy(R1, R3)
R7:6 = mpy(R1, R13)
R0.L = #0x0
R15:14 = #0
}
{
R11:10 = add(R11:10, R11:10)
R7:6 += mpy(R3, R12)
R2.L = #0x0
R15.H = #0x8000
}
{
R7:6 = asr(R7:6, #15)
R12.L = #0x8001
p1 = cmp.eq(R1:0, R3:2)
}
{
R7:6 = add(R7:6, R11:10)
R8 = add(R4, R5)
p2 = cmp.eq(R1:0, R15:14)
}
{
R9 = clb(R7:6)
R3:2 = abs(R7:6)
R11 = #58
}
{
p1 = and(p1, p2)
R8 = sub(R8, R9)
R9 = add(R9, #-1)
p0 = cmp.gt(R9, R11)
}
{
R8 = add(R8, #1)
R1:0 = asl(R7:6, R9)
if(p1) jump .Lsat
}
{
R0 = insert(R8,#16, #0)
if(!p0) jumpr r31
}
{
R0 = insert(R12,#16, #0)
jumpr r31
}
.Lsat:
{
R1:0 = #-1
}
{
R1:0 = lsr(R1:0, #1)
}
{
R0 = insert(R8,#16, #0)
jumpr r31
}
.text
.global __hexagon_fast2_qd2f_asm
.type __hexagon_fast2_qd2f_asm, @function
__hexagon_fast2_qd2f_asm:
.falign
{
R3 = abs(R1):sat
R4 = sxth(R0)
R5 = #0x40
R6.L = #0xffc0
}
{
R0 = extractu(R3, #8, #0)
p2 = cmp.gt(R4, #126)
p3 = cmp.ge(R4, #-126)
R6.H = #0x7fff
}
{
p1 = cmp.eq(R0,#0x40)
if(p1.new) R5 = #0
R4 = add(R4, #126)
if(!p3) jump .Lmin
}
{
p0 = bitsset(R3, R6)
R0.L = #0x0000
R2 = add(R3, R5)
R7 = lsr(R6, #8)
}
{
if(p0) R4 = add(R4, #1)
if(p0) R3 = #0
R2 = lsr(R2, #7)
R0.H = #0x8000
}
{
R0 = and(R0, R1)
R6 &= asl(R4, #23)
if(!p0) R3 = and(R2, R7)
if(p2) jump .Lmax
}
{
R0 += add(R6, R3)
jumpr r31
}
.Lmax:
{
R0.L = #0xffff;
}
{
R0.H = #0x7f7f;
jumpr r31
}
.Lmin:
{
R0 = #0x0
jumpr r31
}
.text
.global __hexagon_fast2_f2qd_asm
.type __hexagon_fast2_f2qd_asm, @function
__hexagon_fast2_f2qd_asm:
.falign
{
R1 = asl(R0, #7)
p0 = tstbit(R0, #31)
R5:4 = #0
R3 = add(R0,R0)
}
{
R1 = setbit(R1, #30)
R0= extractu(R0,#8,#23)
R4.L = #0x8001
p1 = cmp.eq(R3, #0)
}
{
R1= extractu(R1, #31, #0)
R0= add(R0, #-126)
R2 = #0
if(p1) jump .Lminqd
}
{
R0 = zxth(R0)
if(p0) R1= sub(R2, R1)
jumpr r31
}
.Lminqd:
{
R1:0 = R5:4
jumpr r31
}
|
CHERIoT-Platform/cheri-rust
| 11,809
|
library/std/src/sys/pal/sgx/abi/entry.S
|
/* This symbol is used at runtime to figure out the virtual address that the */
/* enclave is loaded at. */
.section absolute
.global IMAGE_BASE
IMAGE_BASE:
.section ".note.x86_64-fortanix-unknown-sgx", "", @note
.align 4
.long 1f - 0f /* name length (not including padding) */
.long 3f - 2f /* desc length (not including padding) */
.long 1 /* type = NT_VERSION */
0: .asciz "toolchain-version" /* name */
1: .align 4
2: .long 1 /* desc - toolchain version number, 32-bit LE */
3: .align 4
.section .rodata
/* The XSAVE area needs to be a large chunk of readable memory, but since we are */
/* going to restore everything to its initial state (XSTATE_BV=0), only certain */
/* parts need to have a defined value. In particular: */
/* */
/* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */
/* RFBM[2] is set, regardless of the value of XSTATE_BV */
/* * XSAVE header */
.align 64
.Lxsave_clear:
.org .+24
.Lxsave_mxcsr:
.short 0x1fbf
/* We can store a bunch of data in the gap between MXCSR and the XSAVE header */
/* The following symbols point at read-only data that will be filled in by the */
/* post-linker. */
/* When using this macro, don't forget to adjust the linker version script! */
.macro globvar name:req size:req
.global \name
.protected \name
.align \size
.size \name , \size
\name :
.org .+\size
.endm
/* The base address (relative to enclave start) of the heap area */
globvar HEAP_BASE 8
/* The heap size in bytes */
globvar HEAP_SIZE 8
/* Value of the RELA entry in the dynamic table */
globvar RELA 8
/* Value of the RELACOUNT entry in the dynamic table */
globvar RELACOUNT 8
/* The enclave size in bytes */
globvar ENCLAVE_SIZE 8
/* The base address (relative to enclave start) of the enclave configuration area */
globvar CFGDATA_BASE 8
/* Non-zero if debugging is enabled, zero otherwise */
globvar DEBUG 1
/* The base address (relative to enclave start) of the enclave text section */
globvar TEXT_BASE 8
/* The size in bytes of enclave text section */
globvar TEXT_SIZE 8
/* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_OFFSET 8
/* The size in bytes of enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_LEN 8
/* The base address (relative to enclave start) of the enclave .eh_frame section */
globvar EH_FRM_OFFSET 8
/* The size in bytes of enclave .eh_frame section */
globvar EH_FRM_LEN 8
.org .Lxsave_clear+512
.Lxsave_header:
.int 0, 0 /* XSTATE_BV */
.int 0, 0 /* XCOMP_BV */
.org .+48 /* reserved bits */
.data
.Laborted:
.byte 0
/* TCS local storage section */
.equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */
.equ tcsls_flags, 0x08 /* initialized by loader */
.equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */
.equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */
/* 14 unused bits */
.equ tcsls_user_fcw, 0x0a
.equ tcsls_user_mxcsr, 0x0c
.equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */
.equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */
.equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */
.equ tcsls_user_rsp, 0x28
.equ tcsls_user_retip, 0x30
.equ tcsls_user_rbp, 0x38
.equ tcsls_user_r12, 0x40
.equ tcsls_user_r13, 0x48
.equ tcsls_user_r14, 0x50
.equ tcsls_user_r15, 0x58
.equ tcsls_tls_ptr, 0x60
.equ tcsls_tcs_addr, 0x68
.macro load_tcsls_flag_secondary_bool reg:req comments:vararg
.ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */
.abort
.endif
mov $(1<<tcsls_flag_secondary),%e\reg
and %gs:tcsls_flags,%\reg
.endm
/* We place the ELF entry point in a separate section so it can be removed by
elf2sgxs */
.section .text_no_sgx, "ax"
.Lelf_entry_error_msg:
.ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n"
.Lelf_entry_error_msg_end:
.global elf_entry
.type elf_entry,function
elf_entry:
/* print error message */
movq $2,%rdi /* write to stderr (fd 2) */
lea .Lelf_entry_error_msg(%rip),%rsi
movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx
.Lelf_entry_call:
movq $1,%rax /* write() syscall */
syscall
test %rax,%rax
jle .Lelf_exit /* exit on error */
add %rax,%rsi
sub %rax,%rdx /* all chars written? */
jnz .Lelf_entry_call
.Lelf_exit:
movq $60,%rax /* exit() syscall */
movq $1,%rdi /* exit code 1 */
syscall
ud2 /* should not be reached */
/* end elf_entry */
/* This code needs to be called *after* the enclave stack has been setup. */
/* There are 3 places where this needs to happen, so this is put in a macro. */
.macro entry_sanitize_final
/* Sanitize rflags received from user */
/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */
/* - AC flag: AEX on misaligned memory accesses leaks side channel info */
pushfq
andq $~0x40400, (%rsp)
popfq
/* check for abort */
bt $0,.Laborted(%rip)
jc .Lreentry_panic
.endm
.text
.global sgx_entry
.type sgx_entry,function
sgx_entry:
/* save user registers */
mov %rcx,%gs:tcsls_user_retip
mov %rsp,%gs:tcsls_user_rsp
mov %rbp,%gs:tcsls_user_rbp
mov %r12,%gs:tcsls_user_r12
mov %r13,%gs:tcsls_user_r13
mov %r14,%gs:tcsls_user_r14
mov %r15,%gs:tcsls_user_r15
mov %rbx,%gs:tcsls_tcs_addr
stmxcsr %gs:tcsls_user_mxcsr
fnstcw %gs:tcsls_user_fcw
/* check for debug buffer pointer */
testb $0xff,DEBUG(%rip)
jz .Lskip_debug_init
mov %r10,%gs:tcsls_debug_panic_buf_ptr
.Lskip_debug_init:
/* reset cpu state */
mov %rdx, %r10
mov $-1, %rax
mov $-1, %rdx
xrstor .Lxsave_clear(%rip)
lfence
mov %r10, %rdx
/* check if returning from usercall */
mov %gs:tcsls_last_rsp,%r11
test %r11,%r11
jnz .Lusercall_ret
/* setup stack */
mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */
/* here. This is fixed below under "adjust stack". */
/* check for thread init */
bts $tcsls_flag_init_once,%gs:tcsls_flags
jc .Lskip_init
/* adjust stack */
lea IMAGE_BASE(%rip),%rax
add %rax,%rsp
mov %rsp,%gs:tcsls_tos
entry_sanitize_final
/* call tcs_init */
/* store caller-saved registers in callee-saved registers */
mov %rdi,%rbx
mov %rsi,%r12
mov %rdx,%r13
mov %r8,%r14
mov %r9,%r15
load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */
call tcs_init
/* reload caller-saved registers */
mov %rbx,%rdi
mov %r12,%rsi
mov %r13,%rdx
mov %r14,%r8
mov %r15,%r9
jmp .Lafter_init
.Lskip_init:
entry_sanitize_final
.Lafter_init:
/* call into main entry point */
load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */
call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */
mov %rax,%rsi /* RSI = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
xor %rdi,%rdi /* RDI = normal exit */
.Lexit:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set later */
/* RCX overwritten by ENCLU */
/* RDX contains return value */
/* RSP set later */
/* RBP set later */
/* RDI contains exit mode */
/* RSI contains return value */
xor %r8,%r8
xor %r9,%r9
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
.Lsgx_exit:
/* clear extended register state */
mov %rdx, %rcx /* save RDX */
mov $-1, %rax
mov %rax, %rdx
xrstor .Lxsave_clear(%rip)
mov %rcx, %rdx /* restore RDX */
/* clear flags */
pushq $0
popfq
/* restore user registers */
mov %gs:tcsls_user_r12,%r12
mov %gs:tcsls_user_r13,%r13
mov %gs:tcsls_user_r14,%r14
mov %gs:tcsls_user_r15,%r15
mov %gs:tcsls_user_retip,%rbx
mov %gs:tcsls_user_rsp,%rsp
mov %gs:tcsls_user_rbp,%rbp
fldcw %gs:tcsls_user_fcw
ldmxcsr %gs:tcsls_user_mxcsr
/* exit enclave */
mov $0x4,%eax /* EEXIT */
enclu
/* end sgx_entry */
.Lreentry_panic:
orq $8,%rsp
jmp abort_reentry
/* This *MUST* be called with 6 parameters, otherwise register information */
/* might leak! */
.global usercall
usercall:
test %rcx,%rcx /* check `abort` function argument */
jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */
jmp .Lusercall_save_state /* non-aborting usercall */
.Lusercall_abort:
/* set aborted bit */
movb $1,.Laborted(%rip)
/* save registers in DEBUG mode, so that debugger can reconstruct the stack */
testb $0xff,DEBUG(%rip)
jz .Lusercall_noreturn
.Lusercall_save_state:
/* save callee-saved state */
push %r15
push %r14
push %r13
push %r12
push %rbp
push %rbx
sub $8, %rsp
fstcw 4(%rsp)
stmxcsr (%rsp)
movq %rsp,%gs:tcsls_last_rsp
.Lusercall_noreturn:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set by sgx_exit */
/* RCX overwritten by ENCLU */
/* RDX contains parameter */
/* RSP set by sgx_exit */
/* RBP set by sgx_exit */
/* RDI contains parameter */
/* RSI contains parameter */
/* R8 contains parameter */
/* R9 contains parameter */
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
/* extended registers/flags cleared by sgx_exit */
/* exit */
jmp .Lsgx_exit
.Lusercall_ret:
movq $0,%gs:tcsls_last_rsp
/* restore callee-saved state, cf. "save" above */
mov %r11,%rsp
/* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */
/* vector instructions is used. We omit the lfence here as one is required before */
/* the jmp instruction anyway. */
ldmxcsr (%rsp)
fldcw 4(%rsp)
add $8, %rsp
entry_sanitize_final
pop %rbx
pop %rbp
pop %r12
pop %r13
pop %r14
pop %r15
/* return */
mov %rsi,%rax /* RAX = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
pop %r11
lfence
jmp *%r11
/*
The following functions need to be defined externally:
```
// Called by entry code on re-entry after exit
extern "C" fn abort_reentry() -> !;
// Called once when a TCS is first entered
extern "C" fn tcs_init(secondary: bool);
// Standard TCS entrypoint
extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64);
```
*/
.global get_tcs_addr
get_tcs_addr:
mov %gs:tcsls_tcs_addr,%rax
pop %r11
lfence
jmp *%r11
.global get_tls_ptr
get_tls_ptr:
mov %gs:tcsls_tls_ptr,%rax
pop %r11
lfence
jmp *%r11
.global set_tls_ptr
set_tls_ptr:
mov %rdi,%gs:tcsls_tls_ptr
pop %r11
lfence
jmp *%r11
.global take_debug_panic_buf_ptr
take_debug_panic_buf_ptr:
xor %rax,%rax
xchg %gs:tcsls_debug_panic_buf_ptr,%rax
pop %r11
lfence
jmp *%r11
|
chisatowo/bochs_os-exp
| 3,250
|
boot/mbr.S
|
; 主引导记录MBR
;
%include "boot.inc"
;LOADER_BASE_ADDR equ 0xA000
;LOADER_START_SECTOR equ 0x2
; ---------------------------------------------
SECTION MBR vstart=0x7c00 ; 指定mbr在链接后的重定位地址为0x7c00(只负责为程序编址即当前程序中所有代码的基地址都从0x7c00开始,不负责加载)
mov ax,cs
mov ds,ax
mov es,ax
mov ss,ax
mov fs,ax
mov sp,0x7c00
mov ax,0xb800 ; VGA文本模式显存地址
mov gs,ax ; gs为内存段寄存器,专门用来对内存进行操作
; 清屏利用功能 0x06,上卷全部行
; INT 0x10 功能号:0x06 功能描述:上卷窗口
; AH 功能号=0x06
; AL = 上卷的行数(如果为0,则表示全部)
; BH = 上卷行属性
; (CL,CH) = 窗口左上角的(X,Y)位置
; (DL,DH) = 窗口右下角的(X,Y)位置
; 无返回值.
mov ax, 0x600
mov bx, 0x700
mov cx, 0x0 ; 左上角: (0,0)
mov dx, 0x184f ; 右下角: (80,25)
; VGA文本模式中一行最多80个字符,共25行
; 下标从0开始,故有24行=0x18, 79个字符=0x4f
int 0x10 ; 呼叫INT中断
;----------- 下面这三行代码获取光标位置 ----------
; .get_cursor获取当前光标位置,在光标位置处打印字符
mov ah, 3 ; 输入: 3号子功能
mov bh, 0 ; 输入: bh寄存器放入待获取光标的页号
int 0x10 ; 呼叫INT中断
; 输出: (CH,CL)存放的是光标的起始行与终止行
; 输出: (DH,DL)存放的是光标的行和列(即Y坐标与X坐标)
;;---------------- 打印字符串 ---------------------
;; INT 0x10 功能号: 0x13 功能描述: 在Teletype模式下显示字符串
;mov ax, message
;mov bp, ax ; es:bp 为字符串首地址,es此时同cs一致
;; 在文件开头已经为sreg初始化
;mov cx, 5 ; 输入: cx放入串长度,不包括结束符0的字符个数
;mov ax, 0x1301 ; 输入: ah放入功能号0x13 al设置写字符方式 al=0x1: 显示字符串,光标跟随移动
;mov bx, 0x2 ; 输入: bh放入要显示的页号 bl设置字符属性,黑底绿字(bl = 02h)
;int 0x10 ; 呼叫INT中断
;--------------- 操纵显存打印字符串 -----------
; 获取光标所在位置,并将位置转换为字节形式存储到cx寄存器
mov bx,dx ; 将前面获取的光标位置取出
mov ax,2 ; 一个字符两个字节
mul bl ; 先求列的字节数,ax与bl相乘,结果值存入ax
mov si,0 ; 由于后面需要用到变址寻址,而实模式下的变址寄存器只能是si,di
add si,ax ; 累加列字节数
mov ax,160 ; 再求行字节数,一行80个字符,一个字符两个字节
mul bh
add si,ax ; 累加行字节数
mov byte [gs:si],'1' ; 在VGA文本模式中,高字节代表字符本身
mov byte [gs:si+0x1],0x94 ; 低字节代表字符属性,此处9代表背景蓝色闪烁,4代表前景色为红色
mov byte [gs:si+0x2],' '
mov byte [gs:si+0x3],0x94
mov byte [gs:si+0x4],'M'
mov byte [gs:si+0x5],0x94
mov byte [gs:si+0x6],'B'
mov byte [gs:si+0x7],0x94
mov byte [gs:si+0x8],'R'
mov byte [gs:si+0x9],0x94
;-------------- 准备读取硬盘扇区 ----------------
mov eax,LOADER_START_SECTOR ; 起始扇区lba地址
mov bx,LOADER_BASE_ADDR ; 读入LOADER的地址
mov cx,1 ; 待读取的扇区数
call rd_disk_m_16 ; 调用函数读入LOADER
jmp LOADER_BASE_ADDR ; 读取完成后,跳转到LOADER所在的地址,执行内核加载并初始化
;-------------- 读取硬盘n个扇区 ------------------
rd_disk_m_16:
; eax=LBA扇区号
; bx=将要读取LOADER的地址
; cx=将要读入的扇区数
mov esi,eax ;备份eax
mov di,cx ;备份cx
; 读取硬盘:
; 1. 设置要读取的扇区数:
mov dx,0x1f2
mov al,cl
out dx,al ;读取的扇区数
mov eax,esi ;恢复eax的值
; 2. 将LBA地址存入0x1f3~0x1f6
; LBA地址7~0位写入端口0x1f3
mov dx,0x1f3
out dx,al
; LBA地址15~8位写入端口0x1f4
mov cl,8
shr eax,cl ; 将eax的值右移8位
; LBA地址23~16位写入端口0x1f5
shr eax,cl
mov dx,0x1f5
out dx,al
shr eax,cl
and al,0x0f ; 获取LBA第24~27位
or al,0xe0 ; 将第7~4位设置为1110,表示LBA模式
mov dx,0x1f6
out dx,al
; 3. 向0x1f7端口写入读命令,0x20
mov dx,0x1f7
mov al,0x20
out dx,al
; 4. 检测硬盘状态
.not_ready:
; 同一端口,out表示写入命令,in表示读入硬盘状态
nop
in al,dx
and al,0x88 ; 第3位为1表示硬盘控制器准备就绪,第7位为1表示硬盘忙
cmp al,0x08
jnz .not_ready ; 第3位不为1说明未准备好,则进入循环
; 5. 从0x1f0端口读取数据
mov ax,di ; di为要读取的扇区数,每次读入一个字(即两个字节)共需di*256次
mov dx,256
mul dx
mov cx,ax
mov dx,0x1f0
.go_on_read:
in ax,dx
mov [bx],ax
add bx,2
loop .go_on_read ; loop循环次数以cx寄存器中的值为准,每次loop时cx的值-1,不为0则跳转
ret
;message db "1 MBR" ; 在此处内存定义变量,存储待打印字符串
times 510-($-$$) db 0 ; 填充剩余字节,确保MBR将磁盘第一扇区占满即512字节
db 0x55,0xaa ; MBR魔数
|
chitwang/sssnakelyzer
| 1,859
|
src/helper_func.s
|
allocmem:
pushq %rbp
mov %rsp, %rbp
pushq %rbx
pushq %rdi
pushq %rsi
pushq %r12
pushq %r13
pushq %r14
pushq %r15
testq $15, %rsp
jz is_mem_aligned
pushq $0 # align to 16 bytes
movq 16(%rbp), %rdi
call malloc
add $8, %rsp # remove padding
jmp mem_done
is_mem_aligned:
movq 16(%rbp), %rdi
call malloc
mem_done:
popq %r15
popq %r14
popq %r13
popq %r12
popq %rsi
popq %rdi
popq %rbx
popq %rbp
ret
print:
pushq %rbp
mov %rsp, %rbp
pushq %rbx
pushq %rdi
pushq %rsi
pushq %r12
pushq %r13
pushq %r14
pushq %r15
testq $15, %rsp
jz is_print_aligned
pushq $0 # align to 16 bytes
lea integer_format(%rip), %rdi
movq 16(%rbp), %rsi
xor %rax, %rax
call printf
add $8, %rsp
jmp print_done
is_print_aligned:
lea integer_format(%rip), %rdi
movq 16(%rbp), %rsi
xor %rax, %rax
call printf
print_done:
popq %r15
popq %r14
popq %r13
popq %r12
popq %rsi
popq %rdi
popq %rbx
popq %rbp
ret
strcmp1:
pushq %rbp
mov %rsp, %rbp
pushq %rbx
pushq %rdi
pushq %rsi
pushq %r12
pushq %r13
pushq %r14
pushq %r15
testq $15, %rsp
jz is_aligned
pushq $0 # align to 16 bytes
movq 24(%rbp), %rdi
movq 16(%rbp), %rsi
xor %rax, %rax
call strcmp
cdqe
add $8, %rsp
jmp cmp_done
is_aligned:
movq 24(%rbp), %rdi
movq 16(%rbp), %rsi
xor %rax, %rax
call strcmp
cdqe
cmp_done:
popq %r15
popq %r14
popq %r13
popq %r12
popq %rsi
popq %rdi
popq %rbx
popq %rbp
ret
|
chitwang/sssnakelyzer
| 1,375
|
test/tests/strcmp.s
|
.file "strcmp.c"
.text
.section .rodata
.LC0:
.string "First string is greater"
.LC1:
.string "Second string is greater"
.LC2:
.string "strings are same"
.text
.globl main
.type main, @function
main:
endbr64
pushq %rbp
movq %rsp, %rbp
subq $64, %rsp
movq %fs:40, %rax
movq %rax, -8(%rbp)
xorl %eax, %eax
movabsq $8022916924116329832, %rax
movl $560229490, %edx
movq %rax, -64(%rbp)
movq %rdx, -56(%rbp)
movl $0, -48(%rbp)
movabsq $8022916924116329800, %rax
movl $560229490, %edx
movq %rax, -32(%rbp)
movq %rdx, -24(%rbp)
movl $0, -16(%rbp)
leaq -32(%rbp), %rdx
leaq -64(%rbp), %rax
movq %rdx, %rsi
movq %rax, %rdi
call strcmp@PLT
testl %eax, %eax
jle .L2
leaq .LC0(%rip), %rdi
call puts@PLT
jmp .L3
.L2:
leaq -32(%rbp), %rdx
leaq -64(%rbp), %rax
movq %rdx, %rsi
movq %rax, %rdi
call strcmp@PLT
testl %eax, %eax
je .L4
leaq .LC1(%rip), %rdi
call puts@PLT
jmp .L3
.L4:
leaq .LC2(%rip), %rdi
movl $0, %eax
call printf@PLT
.L3:
movl $0, %eax
movq -8(%rbp), %rcx
xorq %fs:40, %rcx
je .L6
call __stack_chk_fail@PLT
.L6:
leave
ret
.size main, .-main
.ident "GCC: (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
chungmcl/jerryOS
| 2,482
|
src/entry.S
|
.text
.global _start
_start:
// Grab some system registers for funsies 🤪
mrs x16, CurrentEL
lsr x16, x16, #2 // Extract EL number according to formula (right shift 2)
mrs x17, ID_AA64PFR0_EL1
mrs x18, ID_AA64MMFR1_EL1
mrs x19, ID_AA64PFR1_EL1
mrs x20, ID_AA64MMFR3_EL1
mrs x21, ID_AA64MMFR4_EL1
// Disable trapping of "instructions that access Advanced SIMD and floating-point registers."
// i.e., enable Advanced SIMD and floating-point registers.
// CPACR_EL1's FPEN[21:20] bits should be set to
// "0b11: This control does not cause execution of any instructions to be trapped"
mrs x1, CPACR_EL1
orr x1, x1, #(0b11 << 20)
msr CPACR_EL1, x1
isb
// TODO(chungmcl): Set the register pointer (VBAR_EL1)
// to an Exception Vector Table
// mov x8, {Exception Vector Table Addy}
// msr VBAR_EL1, x8
// For some reason, changing the address of .text and .bss
// using link.lds makes QEMU move the DTB to 0x0, despite the docs at
// https://qemu-project.gitlab.io/qemu/system/arm/virt.html stating
// "For guests booting as “bare-metal” (any other kind of boot),
// the DTB is at the start of RAM (0x4000_0000)."
// Therefore, we pass the DTB address to main() as 0x0 instead of 0x4000_0000
ldr x1, =0x0
// Set the stack pointer to a specific memory address in RAM.
// Leave the value in x2 so we can pass the initial value of $sp to main()
// Why this particular address?
// • QEMU maps RAM to physical address 0x4000_0000, and places the DTB there.
// • jerryOS uses the Cortex-A710's 16KB memory page option.
// • jerryOS will use the second 16KB page after the DTB for the stack and the kernel's
// global variables (the .bss section of the kernel ELF).
// • The second 16KB page in memory, starting at 0x40008000, is structured such that:
// .bss = [0x4000bfff:0x4000a000] (i.e., the second half of the page).
// Therefore, sp starts before 0x4000a000 and grows backwards
// towards the ROM address range, which is just the jerryOS .text and .rodata,
// to avoid overwriting .bss or any other important data.
// • On Cortex-A710, sp must be 16-byte aligned, so start 16 bytes before
// the first byte of .bss: 0x4000a000 - 0x10 = 0x40009ff0.
ldr x2, =0x40009ff0
mov sp, x2
ldr x3, =_kernel_bin
ldr x4, =_rodata_start
ldr x5, =_rodata_end
ldr x6, =_text_start
ldr x7, =_text_end
ldr x8, =_bss_start
ldr x9, =_bss_end
// Jump to main.rs:main()
bl main
.end
|
ciron-nine/CS61C-2020-FALL
| 2,459
|
lab07/cache.s
|
# Rewriten by Stephan Kaminsky in RISC-V on 7/30/2018
# This program accesses an array in ways that provide data about the cache parameters.
# Coupled with the Data Cache Simulator and Memory Reference Visualization to help students
# understand how the cache parameters affect cache performance.
#
# PSEUDOCODE:
# int array[]; //Assume sizeof(int) == 4
# for (k = 0; k < repcount; k++) { // repeat repcount times
# /* Step through the selected array segment with the given step size. */
# for (index = 0; index < arraysize; index += stepsize) {
# if(option==0)
# array[index] = 0; // Option 0: One cache access - write
# else
# array[index] = array[index] + 1; // Option 1: Two cache accesses - read AND write
# }
# }
.data
array: .word 2048 # max array size specified in BYTES (DO NOT CHANGE)
.text
##################################################################################################
# You MAY change the code below this section
main: li a0, 128 # array size in BYTES (power of 2 < array size)
li a1, 1 # step size (power of 2 > 0)
li a2, 1 # rep count (int > 0)
li a3, 0 # 0 - option 0, 1 - option 1
# You MAY change the code above this section
##################################################################################################
jal accessWords # lw/sw
#jal accessBytes # lb/sb
li a0,10 # exit
ecall
# SUMMARY OF REGISTER USE:
# a0 = array size in bytes
# a1 = step size
# a2 = number of times to repeat
# a3 = 0 (W) / 1 (RW)
# s0 = moving array ptr
# s1 = array limit (ptr)
accessWords:
la s0, array # ptr to array
add s1, s0, a0 # hardcode array limit (ptr)
slli t1, a1, 2 # multiply stepsize by 4 because WORDS
wordLoop:
beq a3, zero, wordZero
lw t0, 0(s0) # array[index/4]++
addi t0, t0, 1
sw t0, 0(s0)
j wordCheck
wordZero:
sw zero, 0(s0) # array[index/4] = 0
wordCheck:
add s0, s0, t1 # increment ptr
blt s0, s1, wordLoop # inner loop done?
addi a2, a2, -1
bgtz a2, accessWords # outer loop done?
jr ra
accessBytes:
la s0, array # ptr to array
add s1, s0, a0 # hardcode array limit (ptr)
byteLoop:
beq a3, zero, byteZero
lbu t0, 0(s0) # array[index]++
addi t0, t0, 1
sb t0, 0(s0)
j byteCheck
byteZero:
sb zero, 0(s0) # array[index] = 0
byteCheck:
add s0, s0, a1 # increment ptr
blt s0, s1, byteLoop # inner loop done?
addi a2, a2, -1
bgtz a2, accessBytes # outer loop done?
jr ra
|
ciron-nine/CS61C-2020-FALL
| 4,624
|
lab04/megalistmanips.s
|
.globl map
.data
arrays: .word 5, 6, 7, 8, 9
.word 1, 2, 3, 4, 7
.word 5, 2, 7, 4, 3
.word 1, 6, 3, 8, 4
.word 5, 2, 7, 8, 1
start_msg: .asciiz "Lists before: \n"
end_msg: .asciiz "Lists after: \n"
.text
main:
jal create_default_list
mv s0, a0 # v0 = s0 is head of node list
#print "lists before: "
la a1, start_msg
li a0, 4
ecall
#print the list
add a0, s0, x0
jal print_list
# print a newline
jal print_newline
# issue the map call
add a0, s0, x0 # load the address of the first node into a0
la a1, mystery # load the address of the function into a1
jal map
# print "lists after: "
la a1, end_msg
li a0, 4
ecall
# print the list
add a0, s0, x0
jal print_list
li a0, 10
ecall
map:
addi sp, sp, -24
sw ra, 0(sp)
sw s1, 4(sp)
sw s0, 8(sp)
beq a0, x0, done # if we were given a null pointer, we're done.
add s0, a0, x0 # save address of this node in s0
add s1, a1, x0 # save address of function in s1
add t0, x0, x0 # t0 is a counter
# remember that each node is 12 bytes long:
# - 4 for the array pointer
# - 4 for the size of the array
# - 4 more for the pointer to the next node
# also keep in mind that we should not make ANY assumption on which registers
# are modified by the callees, even when we know the content inside the functions
# we call. this is to enforce the abstraction barrier of calling convention.
mapLoop:
lw t1, 0(s0) # load the address of the array of current node into t1
lw t2, 4(s0) # load the size of the node's array into t2
li t3, 4
mul t3, t3, t0
add t1, t1, t3 # offset the array address by the count
lw a0, 0(t1) # load the value at that address into a0
sw t0, 12(sp)
sw t1, 16(sp)
sw t2, 20(sp)
jalr s1 # call the function on that value.
lw t2, 20(sp)
lw t1, 16(sp)
lw t0, 12(sp)
sw a0, 0(t1) # store the returned value back into the array
addi t0, t0, 1 # increment the count
bne t0, t2, mapLoop # repeat if we haven't reached the array size yet
lw a0, 8(s0) # load the address of the next node into a0
mv a1, s1 # put the address of the function back into a1 to prepare for the recursion
jal ra, map # recurse
done:
lw s0, 8(sp)
lw s1, 4(sp)
lw ra, 0(sp)
addi sp, sp, 24
jr ra
mystery:
mul t1, a0, a0
add a0, t1, a0
jr ra
create_default_list:
addi sp, sp, -4
sw ra, 0(sp)
li s0, 0 # pointer to the last node we handled
li s1, 0 # number of nodes handled
li s2, 5 # size
la s3, arrays
loop: #do...
li a0, 12
jal malloc # get memory for the next node
mv s4, a0
li a0, 20
jal malloc # get memory for this array
sw a0, 0(s4) # node->arr = malloc
lw a0, 0(s4)
mv a1, s3
jal fillArray # copy ints over to node->arr
sw s2, 4(s4) # node->size = size (4)
sw s0, 8(s4) # node-> next = previously created node
add s0, x0, s4 # last = node
addi s1, s1, 1 # i++
addi s3, s3, 20 # s3 points at next set of ints
li t6 5
bne s1, t6, loop # ... while i!= 5
mv a0, s4
lw ra, 0(sp)
addi sp, sp, 4
jr ra
fillArray: lw t0, 0(a1) #t0 gets array element
sw t0, 0(a0) #node->arr gets array element
lw t0, 4(a1)
sw t0, 4(a0)
lw t0, 8(a1)
sw t0, 8(a0)
lw t0, 12(a1)
sw t0, 12(a0)
lw t0, 16(a1)
sw t0, 16(a0)
jr ra
print_list:
bne a0, x0, printMeAndRecurse
jr ra # nothing to print
printMeAndRecurse:
mv t0, a0 # t0 gets address of current node
lw t3, 0(a0) # t3 gets array of current node
li t1, 0 # t1 is index into array
printLoop:
slli t2, t1, 2
add t4, t3, t2
lw a1, 0(t4) # a0 gets value in current node's array at index t1
li a0, 1 # preparte for print integer ecall
ecall
li a1, ' ' # a0 gets address of string containing space
li a0, 11 # prepare for print string ecall
ecall
addi t1, t1, 1
li t6 5
bne t1, t6, printLoop # ... while i!= 5
li a1, '\n'
li a0, 11
ecall
lw a0, 8(t0) # a0 gets address of next node
j print_list # recurse. We don't have to use jal because we already have where we want to return to in ra
print_newline:
li a1, '\n'
li a0, 11
ecall
jr ra
malloc:
mv a1, a0 # Move a0 into a1 so that we can do the syscall correctly
li a0, 9
ecall
jr ra
|
ciron-nine/CS61C-2020-FALL
| 2,118
|
lab04/discrete_fn.s
|
.globl f
.data
neg3: .asciiz "f(-3) should be 6, and it is: "
neg2: .asciiz "f(-2) should be 61, and it is: "
neg1: .asciiz "f(-1) should be 17, and it is: "
zero: .asciiz "f(0) should be -38, and it is: "
pos1: .asciiz "f(1) should be 19, and it is: "
pos2: .asciiz "f(2) should be 42, and it is: "
pos3: .asciiz "f(3) should be 5, and it is: "
output: .word 6, 61, 17, -38, 19, 42, 5
.text
main:
la a0, neg3
jal print_str
li a0, -3
la a1, output
jal f # evaluate f(-3); should be 6
jal print_int
jal print_newline
la a0, neg2
jal print_str
li a0, -2
la a1, output
jal f # evaluate f(-2); should be 61
jal print_int
jal print_newline
la a0, neg1
jal print_str
li a0, -1
la a1, output
jal f # evaluate f(-1); should be 17
jal print_int
jal print_newline
la a0, zero
jal print_str
li a0, 0
la a1, output
jal f # evaluate f(0); should be -38
jal print_int
jal print_newline
la a0, pos1
jal print_str
li a0, 1
la a1, output
jal f # evaluate f(1); should be 19
jal print_int
jal print_newline
la a0, pos2
jal print_str
li a0, 2
la a1, output
jal f # evaluate f(2); should be 42
jal print_int
jal print_newline
la a0, pos3
jal print_str
li a0, 3
la a1, output
jal f # evaluate f(3); should be 5
jal print_int
jal print_newline
li a0, 10
ecall
# f takes in two arguments:
# a0 is the value we want to evaluate f at
# a1 is the address of the "output" array (defined above).
# Think: why might having a1 be useful?
f:
# YOUR CODE GOES HERE!
addi a0, a0, 3
li t0, 4
mul a0, a0, t0
add a1, a0, a1
lw a0, 0(a1)
jr ra # Always remember to jr ra after your function!
print_int:
mv a1, a0
li a0, 1
ecall
jr ra
print_str:
mv a1, a0
li a0, 4
ecall
jr ra
print_newline:
li a1, '\n'
li a0, 11
ecall
jr ra
|
ciron-nine/CS61C-2020-FALL
| 3,704
|
lab03/list_map.s
|
.globl map
.text
main:
jal ra, create_default_list
add s0, a0, x0 # a0 = s0 is head of node list
#print the list
add a0, s0, x0
jal ra, print_list
# print a newline
jal ra, print_newline
# load your args
add a0, s0, x0 # load the address of the first node into a0
# load the address of the function in question into a1 (check out la on the green sheet)
### YOUR CODE HERE ###
la a1, square
# issue the call to map
jal ra, map
# print the list
add a0, s0, x0
jal ra, print_list
# print another newline
jal ra, print_newline
addi a0, x0, 10
ecall #Terminate the program
map:
# Prologue: Make space on the stack and back-up registers
### YOUR CODE HERE ###
addi sp, sp, -12
sw ra, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
beq a0, x0, done # If we were given a null pointer (address 0), we're done.
add s0, a0, x0 # Save address of this node in s0
add s1, a1, x0 # Save address of function in s1
# Remember that each node is 8 bytes long: 4 for the value followed by 4 for the pointer to next.
# What does this tell you about how you access the value and how you access the pointer to next?
# load the value of the current node into a0
# THINK: why a0?
### YOUR CODE HERE ###
lw a0, 0(s0)
# Call the function in question on that value. DO NOT use a label (be prepared to answer why).
# What function? Recall the parameters of "map"
### YOUR CODE HERE ###
jalr s1
# store the returned value back into the node
# Where can you assume the returned value is?
### YOUR CODE HERE ###
sw a0, 0(s0)
# Load the address of the next node into a0
# The Address of the next node is an attribute of the current node.
# Think about how structs are organized in memory.
### YOUR CODE HERE ###
lw a0, 4(s0)
# Put the address of the function back into a1 to prepare for the recursion
# THINK: why a1? What about a0?
### YOUR CODE HERE ###
mv a1, s1
# recurse
### YOUR CODE HERE ###
jal ra, map
done:
# Epilogue: Restore register values and free space from the stack
### YOUR CODE HERE ###
lw ra, 0(sp)
lw s0, 4(sp)
lw s1, 8(sp)
addi sp, sp, 12
jr ra # Return to caller
square:
mul a0 ,a0, a0
jr ra
create_default_list:
addi sp, sp, -12
sw ra, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
li s0, 0 # pointer to the last node we handled
li s1, 0 # number of nodes handled
loop: #do...
li a0, 8
jal ra, malloc # get memory for the next node
sw s1, 0(a0) # node->value = i
sw s0, 4(a0) # node->next = last
add s0, a0, x0 # last = node
addi s1, s1, 1 # i++
addi t0, x0, 10
bne s1, t0, loop # ... while i!= 10
lw ra, 0(sp)
lw s0, 4(sp)
lw s1, 8(sp)
addi sp, sp, 12
jr ra
print_list:
bne a0, x0, printMeAndRecurse
jr ra # nothing to print
printMeAndRecurse:
add t0, a0, x0 # t0 gets current node address
lw a1, 0(t0) # a1 gets value in current node
addi a0, x0, 1 # prepare for print integer ecall
ecall
addi a1, x0, ' ' # a0 gets address of string containing space
addi a0, x0, 11 # prepare for print string syscall
ecall
lw a0, 4(t0) # a0 gets address of next node
jal x0, print_list # recurse. We don't have to use jal because we already have where we want to return to in ra
print_newline:
addi a1, x0, '\n' # Load in ascii code for newline
addi a0, x0, 11
ecall
jr ra
malloc:
addi a1, a0, 0
addi a0, x0 9
ecall
jr ra
|
ciron-nine/CS61C-2020-FALL
| 1,089
|
lab03/ex2.s
|
.globl main
.data
source:
.word 3
.word 1
.word 4
.word 1
.word 5
.word 9
.word 0
dest:
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.text
fun:
addi t0, a0, 1
sub t1, x0, a0
mul a0, t0, t1
jr ra
main:
# BEGIN PROLOGUE
addi sp, sp, -20
sw s0, 0(sp)
sw s1, 4(sp)
sw s2, 8(sp)
sw s3, 12(sp)
sw ra, 16(sp)
# END PROLOGUE
addi t0, x0, 0
addi s0, x0, 0
la s1, source
la s2, dest
loop:
slli s3, t0, 2
add t1, s1, s3
lw t2, 0(t1)
beq t2, x0, exit
add a0, x0, t2
addi sp, sp, -8
sw t0, 0(sp)
sw t2, 4(sp)
jal fun
lw t0, 0(sp)
lw t2, 4(sp)
addi sp, sp, 8
add t2, x0, a0
add t3, s2, s3
sw t2, 0(t3)
add s0, s0, t2
addi t0, t0, 1
jal x0, loop
exit:
add a0, x0, s0
# BEGIN EPILOGUE
lw s0, 0(sp)
lw s1, 4(sp)
lw s2, 8(sp)
lw s3, 12(sp)
lw ra, 16(sp)
addi sp, sp, 20
# END EPILOGUE
jr ra
|
ciron-nine/CS61C-2020-FALL
| 5,080
|
lab03/cc_test.s
|
.globl simple_fn naive_pow inc_arr
.data
failure_message: .asciiz "Test failed for some reason.\n"
success_message: .asciiz "Sanity checks passed! Make sure there are no CC violations.\n"
array:
.word 1 2 3 4 5
exp_inc_array_result:
.word 2 3 4 5 6
.text
main:
# We test our program by loading a bunch of random values
# into a few saved registers - if any of these are modified
# after these functions return, then we know calling
# convention was broken by one of these functions
li s0, 2623
li s1, 2910
# ... skipping middle registers so the file isn't too long
# If we wanted to be rigorous, we would add checks for
# s2-s20 as well
li s11, 134
# Now, we call some functions
# simple_fn: should return 1
jal simple_fn # Shorthand for "jal ra, simple_fn"
li t0, 1
bne a0, t0, failure
# naive_pow: should return 2 ** 7 = 128
li a0, 2
li a1, 7
jal naive_pow
li t0, 128
bne a0, t0, failure
# inc_arr: increments "array" in place
la a0, array
li a1, 5
jal inc_arr
jal check_arr # Verifies inc_arr and jumps to "failure" on failure
# Check the values in the saved registers for sanity
li t0, 2623
li t1, 2910
li t2, 134
bne s0, t0, failure
bne s1, t1, failure
bne s11, t2, failure
# If none of those branches were hit, print a message and exit normally
li a0, 4
la a1, success_message
ecall
li a0, 10
ecall
# Just a simple function. Returns 1.
#
# FIXME Fix the reported error in this function (you can delete lines
# if necessary, as long as the function still returns 1 in a0).
simple_fn:
li a0, 1
ret
# Computes a0 to the power of a1.
# This is analogous to the following C pseudocode:
#
# uint32_t naive_pow(uint32_t a0, uint32_t a1) {
# uint32_t s0 = 1;
# while (a1 != 0) {
# s0 *= a0;
# a1 -= 1;
# }
# return s0;
# }
#
# FIXME There's a CC error with this function!
# The big all-caps comments should give you a hint about what's
# missing. Another hint: what does the "s" in "s0" stand for?
naive_pow:
# BEGIN PROLOGUE
addi sp, sp, -4
sw s0, 0(sp)
# END PROLOGUE
li s0, 1
naive_pow_loop:
beq a1, zero, naive_pow_end
mul s0, s0, a0
addi a1, a1, -1
j naive_pow_loop
naive_pow_end:
mv a0, s0
# BEGIN EPILOGUE
lw s0, 0(sp)
addi sp, sp, 4
# END EPILOGUE
ret
# Increments the elements of an array in-place.
# a0 holds the address of the start of the array, and a1 holds
# the number of elements it contains.
#
# This function calls the "helper_fn" function, which takes in an
# address as argument and increments the 32-bit value stored there.
inc_arr:
# BEGIN PROLOGUE
#
# FIXME What other registers need to be saved?
#
addi sp, sp, -16
sw ra, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
sw s2, 12(sp)
# END PROLOGUE
mv s0, a0 # Copy start of array to saved register
mv s1, a1 # Copy length of array to saved register
li t0, 0 # Initialize counter to 0
inc_arr_loop:
beq t0, s1, inc_arr_end
slli t1, t0, 2 # Convert array index to byte offset
add a0, s0, t1 # Add offset to start of array
# Prepare to call helper_fn
#
# FIXME Add code to preserve the value in t0 before we call helper_fn
# Hint: What does the "t" in "t0" stand for?
# Also ask yourself this: why don't we need to preserve t1?
#
mv s2, t0
jal helper_fn
# Finished call for helper_fn
mv t0, s2
addi t0, t0, 1 # Increment counter
j inc_arr_loop
inc_arr_end:
# BEGIN EPILOGUE
lw ra, 0(sp)
lw s0, 4(sp)
lw s1, 8(sp)
lw s2, 12(sp)
addi sp, sp, 16
# END EPILOGUE
ret
# This helper function adds 1 to the value at the memory address in a0.
# It doesn't return anything.
# C pseudocode for what it does: "*a0 = *a0 + 1"
#
# FIXME This function also violates calling convention, but it might not
# be reported by the Venus CC checker (try and figure out why).
# You should fix the bug anyway by filling in the prologue and epilogue
# as appropriate.
helper_fn:
# BEGIN PROLOGUE
addi sp, sp, -4
sw s0, 0(sp)
# END PROLOGUE
lw t1, 0(a0)
addi s0, t1, 1
sw s0, 0(a0)
# BEGIN EPILOGUE
lw s0, 0(sp)
addi sp, sp, 4
# END EPILOGUE
ret
# YOU CAN IGNORE EVERYTHING BELOW THIS COMMENT
# Checks the result of inc_arr, which should contain 2 3 4 5 6 after
# one call.
# You can safely ignore this function; it has no errors.
check_arr:
la t0, exp_inc_array_result
la t1, array
addi t2, t1, 20 # Last element is 5*4 bytes off
check_arr_loop:
beq t1, t2, check_arr_end
lw t3, 0(t0)
lw t4, 0(t1)
bne t3, t4, failure
addi t0, t0, 4
addi t1, t1, 4
j check_arr_loop
check_arr_end:
ret
# This isn't really a function - it just prints a message, then
# terminates the program on failure. Think of it like an exception.
failure:
li a0, 4 # String print ecall
la a1, failure_message
ecall
li a0, 10 # Exit ecall
ecall
|
ciron-nine/CS61C-2020-FALL
| 2,265
|
proj2/src/write_matrix.s
|
.globl write_matrix
.text
# ==============================================================================
# FUNCTION: Writes a matrix of integers into a binary file
# FILE FORMAT:
# The first 8 bytes of the file will be two 4 byte ints representing the
# numbers of rows and columns respectively. Every 4 bytes thereafter is an
# element of the matrix in row-major order.
# Arguments:
# a0 (char*) is the pointer to string representing the filename
# a1 (int*) is the pointer to the start of the matrix in memory
# a2 (int) is the number of rows in the matrix
# a3 (int) is the number of columns in the matrix
# Returns:
# None
# Exceptions:
# - If you receive an fopen error or eof,
# this function terminates the program with error code 93.
# - If you receive an fwrite error or eof,
# this function terminates the program with error code 94.
# - If you receive an fclose error or eof,
# this function terminates the program with error code 95.
# ==============================================================================
write_matrix:
addi sp, sp, -40
sw s3, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
sw s2, 12(sp)
sw ra, 16(sp)
sw s4, 20(sp)
sw s5, 24(sp)
sw s6, 28(sp)
sw s7, 32(sp)
sw s8, 36(sp)
# Prologue
mv s0, a2 # rows
mv s1, a3 # cols
mv s2, a1 # pointer martix
mv a1, a0
li a2, 1
jal ra, fopen
li t0, -1
beq a0, t0, openerror
mv s3 a0 #file pointer
li a0, 8
jal ra, malloc
mv s4, a0 #eight pointer
sw s0, 0(s4)
sw s1, 4(s4)
mv a1, s3
mv a2, s4
li a3, 2
li a4, 4
jal ra, fwrite
li a3, 2
bne a0, a3, writeerror
mv a0, s4
jal ra, free
mul a3, s0, s1
mul s0, s1, s0
li a4, 4
mv a2, s2
mv a1, s3
jal ra, fwrite
bne a0, s0, writeerror
mv a0, s2
jal ra, free
mv a1, s3
jal ra, fclose
li t0, -1
beq a0, t0, closeerror
# Epilogue
lw s3, 0(sp)
lw s0, 4(sp)
lw s1, 8(sp)
lw s2, 12(sp)
lw ra, 16(sp)
lw s4, 20(sp)
lw s5, 24(sp)
lw s6, 28(sp)
lw s7, 32(sp)
lw s8, 36(sp)
addi sp, sp, 40
ret
openerror:
li a1, 93
j exit2
writeerror:
li a1, 94
j exit2
closeerror:
li a1, 95
j exit2
|
ciron-nine/CS61C-2020-FALL
| 10,441
|
proj2/src/utils.s
|
##############################################################
# Do not modify! (But feel free to use the functions provided)
##############################################################
#define c_print_int 1
#define c_print_str 4
#define c_atoi 5
#define c_sbrk 9
#define c_exit 10
#define c_print_char 11
#define c_openFile 13
#define c_readFile 14
#define c_writeFile 15
#define c_closeFile 16
#define c_exit2 17
#define c_fflush 18
#define c_feof 19
#define c_ferror 20
#define c_printHex 34
# ecall wrappers
.globl print_int, print_str, atoi, sbrk, exit, print_char, fopen, fread, fwrite, fclose, exit2, fflush, ferror, print_hex
# helper functions
.globl file_error, print_int_array, malloc, free, print_num_alloc_blocks, num_alloc_blocks
# unittest helper functions
.globl compare_int_array
.data
error_string: .string "This library file should not be directly called!"
.text
# Exits if you run this file
main:
la a1 error_string
jal print_str
li a1 1
jal exit2
# End main
#================================================================
# void print_int(int a1)
# Prints the integer in a1.
# args:
# a1 = integer to print
# return:
# void
#================================================================
print_int:
li a0 c_print_int
ecall
ret
#================================================================
# void print_str(char *a1)
# Prints the null-terminated string at address a1.
# args:
# a1 = address of the string you want printed.
# return:
# void
#================================================================
print_str:
li a0 c_print_str
ecall
ret
#================================================================
# int atoi(char* a1)
# Returns the integer version of the string at address a1.
# args:
# a1 = address of the string you want to turn into an integer.
# return:
# a0 = Integer representation of string
#================================================================
atoi:
li a0 c_atoi
ecall
ret
#================================================================
# void *sbrk(int a1)
# Allocates a1 bytes onto the heap.
# args:
# a1 = Number of bytes you want to allocate.
# return:
# a0 = Pointer to the start of the allocated memory
#================================================================
sbrk:
li a0 c_sbrk
ecall
ret
#================================================================
# void noreturn exit()
# Exits the program with a zero exit code.
# args:
# None
# return:
# No Return
#================================================================
exit:
li a0 c_exit
ecall
#================================================================
# void print_char(char a1)
# Prints the ASCII character in a1 to the console.
# args:
# a1 = character to print
# return:
# void
#================================================================
print_char:
li a0 c_print_char
ecall
ret
#================================================================
# int fopen(char *a1, int a2)
# Opens file with name a1 with permissions a2.
# args:
# a1 = filepath
# a2 = permissions (0, 1, 2, 3, 4, 5 = r, w, a, r+, w+, a+)
# return:
# a0 = file descriptor
#================================================================
fopen:
li a0 c_openFile
ecall
#FOPEN_RETURN_HOOK
ret
#================================================================
# int fread(int a1, void *a2, size_t a3)
# Reads a3 bytes of the file into the buffer a2.
# args:
# a1 = file descriptor
# a2 = pointer to the buffer you want to write the read bytes to.
# a3 = Number of bytes to be read.
# return:
# a0 = Number of bytes actually read.
#================================================================
fread:
li a0 c_readFile
ecall
#FREAD_RETURN_HOOK
ret
#================================================================
# int fwrite(int a1, void *a2, size_t a3, size_t a4)
# Writes a3 * a4 bytes from the buffer in a2 to the file descriptor a1.
# args:
# a1 = file descriptor
# a2 = Buffer to read from
# a3 = Number of items to read from the buffer.
# a4 = Size of each item in the buffer.
# return:
# a0 = Number of elements writen. If this is less than a3,
# it is either an error or EOF. You will also need to still flush the fd.
#================================================================
fwrite:
li a0 c_writeFile
ecall
#FWRITE_RETURN_HOOK
ret
#================================================================
# int fclose(int a1)
# Closes the file descriptor a1.
# args:
# a1 = file descriptor
# return:
# a0 = 0 on success, and EOF (-1) otherwise.
#================================================================
fclose:
li a0 c_closeFile
ecall
#FCLOSE_RETURN_HOOK
ret
#================================================================
# void noreturn exit2(int a1)
# Exits the program with error code a1.
# args:
# a1 = Exit code.
# return:
# This program does not return.
#================================================================
exit2:
li a0 c_exit2
ecall
ret
#================================================================
# int fflush(int a1)
# Flushes the data to the filesystem.
# args:
# a1 = file descriptor
# return:
# a0 = 0 on success, and EOF (-1) otherwise.
#================================================================
fflush:
li a0 c_fflush
ecall
ret
#================================================================
# int ferror(int a1)
# Returns a nonzero value if the file stream has errors, otherwise it returns 0.
# args:
# a1 = file descriptor
# return:
# a0 = Nonzero falue if the end of file is reached. 0 Otherwise.
#================================================================
ferror:
li a0 c_ferror
ecall
ret
#================================================================
# void print_hex(int a1)
#
# args:
# a1 = The word which will be printed as a hex value.
# return:
# void
#================================================================
print_hex:
li a0 c_printHex
ecall
ret
#================================================================
# void* malloc(int a0)
# Allocates heap memory and return a pointer to it
# args:
# a0 is the # of bytes to allocate heap memory for
# return:
# a0 is the pointer to the allocated heap memory
#================================================================
malloc:
# Call to sbrk
mv a1 a0
li a0 0x3CC
addi a6 x0 1
ecall
#MALLOC_RETURN_HOOK
ret
#================================================================
# void free(int a0)
# Frees heap memory referenced by pointer
# args:
# a0 is the pointer to heap memory to free
# return:
# void
#================================================================
free:
mv a1 a0
li a0 0x3CC
addi a6 x0 4
ecall
ret
#================================================================
# void num_alloc_blocks(int a0)
# Returns the number of currently allocated blocks
# args:
# void
# return:
# a0 is the # of allocated blocks
#================================================================
num_alloc_blocks:
li a0, 0x3CC
li a6, 5
ecall
ret
print_num_alloc_blocks:
addi sp, sp -4
sw ra 0(sp)
jal num_alloc_blocks
mv a1 a0
jal print_int
li a1 '\n'
jal print_char
lw ra 0(sp)
addi sp, sp 4
ret
#================================================================
# void print_int_array(int* a0, int a1, int a2)
# Prints an integer array, with spaces between the elements
# args:
# a0 is the pointer to the start of the array
# a1 is the # of rows in the array
# a2 is the # of columns in the array
# return:
# void
#================================================================
print_int_array:
# Prologue
addi sp sp -24
sw s0 0(sp)
sw s1 4(sp)
sw s2 8(sp)
sw s3 12(sp)
sw s4 16(sp)
sw ra 20(sp)
# Save arguments
mv s0 a0
mv s1 a1
mv s2 a2
# Set outer loop index
li s3 0
outer_loop_start:
# Check outer loop condition
beq s3 s1 outer_loop_end
# Set inner loop index
li s4 0
inner_loop_start:
# Check inner loop condition
beq s4 s2 inner_loop_end
# t0 = row index * len(row) + column index
mul t0 s2 s3
add t0 t0 s4
slli t0 t0 2
# Load matrix element
add t0 t0 s0
lw t1 0(t0)
# Print matrix element
mv a1 t1
jal print_int
# Print whitespace
li a1 ' '
jal print_char
addi s4 s4 1
j inner_loop_start
inner_loop_end:
# Print newline
li a1 '\n'
jal print_char
addi s3 s3 1
j outer_loop_start
outer_loop_end:
# Epilogue
lw s0 0(sp)
lw s1 4(sp)
lw s2 8(sp)
lw s3 12(sp)
lw s4 16(sp)
lw ra 20(sp)
addi sp sp 24
ret
#================================================================
# void compare_int_array(int a0, int* a0, int* a1, int a2)
# Prints an integer array, with spaces between the elements
# args:
# a0 is the base exit code that will be used if an unequal element is found
# a1 is the pointer to the expected data
# a2 is the pointer to the actual data
# a3 is the number of elements in each array
# a4 is the error message
# return:
# void
#================================================================
compare_int_array:
# Prologue
addi sp sp -24
sw s0 0(sp)
sw s1 4(sp)
sw s2 8(sp)
sw s3 12(sp)
sw s4 16(sp)
sw ra 20(sp)
# save pointer to original array in s1
mv s1, a2
# t0: current element
mv t0 zero
loop_start:
# we are done once t0 >= a3
bge t0, a3, end
# t1 := *a1
lw t1, 0(a1)
# t2 := *a2
lw t2, 0(a2)
# if the values are different -> fail
bne t1, t2, fail
# go to next value
addi t0, t0, 1
addi a1, a1, 4
addi a2, a2, 4
j loop_start
fail:
# exit code: a0
mv s0, a0
# remember length
mv s2, a3
# print user supplied error message
mv a1, a4
jal print_str
# print actual data
mv a0, s1
li a1, 1
mv a2, s2
jal print_int_array
# exit with user defined error code
mv a1, s0
jal exit2
end:
# Epilogue
lw s0 0(sp)
lw s1 4(sp)
lw s2 8(sp)
lw s3 12(sp)
lw s4 16(sp)
lw ra 20(sp)
addi sp sp 24
ret
|
ciron-nine/CS61C-2020-FALL
| 1,040
|
proj2/src/relu.s
|
.globl relu
.text
# ==============================================================================
# FUNCTION: Performs an inplace element-wise ReLU on an array of ints
# Arguments:
# a0 (int*) is the pointer to the array
# a1 (int) is the # of elements in the array
# Returns:
# None
# Exceptions:
# - If the length of the vector is less than 1,
# this function terminates the program with error code 78.
# ==============================================================================
relu:
li t0, 1
blt a1, t0, error
# Prologue
addi sp, sp, -16
sw s3, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
sw s2, 12(sp)
addi s0, zero, 4
mul s2, a1, s0
mv s0, a0
mv s1, zero
loop_start:
lw s3, 0(s0)
bge s3, zero, loop_continue
mv s3, zero
sw s3, 0(s0)
loop_continue:
addi s1, s1, 4
addi s0, s0, 4
blt s1, s2, loop_start
loop_end:
# Epilogue
lw s3, 0(sp)
lw s0, 4(sp)
lw s1, 8(sp)
lw s2, 12(sp)
addi sp, sp, 16
ret
error:
li a1, 78
j exit2
|
ciron-nine/CS61C-2020-FALL
| 1,385
|
proj2/src/dot.s
|
.globl dot
.text
# =======================================================
# FUNCTION: Dot product of 2 int vectors
# Arguments:
# a0 (int*) is the pointer to the start of v0
# a1 (int*) is the pointer to the start of v1
# a2 (int) is the length of the vectors
# a3 (int) is the stride of v0
# a4 (int) is the stride of v1
# Returns:
# a0 (int) is the dot product of v0 and v1
# Exceptions:
# - If the length of the vector is less than 1,
# this function terminates the program with error code 75.
# - If the stride of either vector is less than 1,
# this function terminates the program with error code 76.
# =======================================================
dot:
li t6, 1
blt a2, t6, error1
blt a3, t6, error2
blt a4, t6, error2
addi sp, sp, -20
sw s3, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
sw s2, 12(sp)
sw s4, 16(sp)
# Prologue
li s0, 4
mul a3, a3, s0
mul a4, a4, s0
mv s0, a0
mv s1, a1
mv s2, zero
mv a0, zero
loop_start:
lw s3, 0(s0)
lw s4, 0(s1)
mul s3, s3, s4
add a0, s3, a0
add s0, s0, a3
add s1, s1, a4
addi s2, s2, 1
blt s2, a2, loop_start
loop_end:
# Epilogue
lw s3, 0(sp)
lw s0, 4(sp)
lw s1, 8(sp)
lw s2, 12(sp)
lw s4, 16(sp)
addi sp, sp, 20
ret
error1:
li a1, 75
j exit2
error2:
li a1, 76
j exit2
|
ciron-nine/CS61C-2020-FALL
| 1,231
|
proj2/src/argmax.s
|
.globl argmax
.text
# =================================================================
# FUNCTION: Given a int vector, return the index of the largest
# element. If there are multiple, return the one
# with the smallest index.
# Arguments:
# a0 (int*) is the pointer to the start of the vector
# a1 (int) is the # of elements in the vector
# Returns:
# a0 (int) is the first index of the largest element
# Exceptions:
# - If the length of the vector is less than 1,
# this function terminates the program with error code 77.
# =================================================================
argmax:
li t1, 1
blt a1, t1, error
addi sp, sp, -16
sw s3, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
sw s2, 12(sp)
# Prologue
mv s0, a0
mv s1, zero
mv s3, zero
mv s2, zero
loop_start:
li t1, 4
mul t2, s1, t1
add t2, t2, s0
lw t0, 0(t2)
li t3, 4
mul t1, s3, t3
add t1, t1, s0
lw t2, 0(t1)
bge t0, t2, loop_continue
mv s1, s3
loop_continue:
addi s3, s3, 1
blt s3, a1, loop_start
loop_end:
mv a0, s1
# Epilogue
lw s3, 0(sp)
lw s0, 4(sp)
lw s1, 8(sp)
lw s2, 12(sp)
addi sp, sp, 16
ret
error:
li a1, 77
j exit2
|
ciron-nine/CS61C-2020-FALL
| 2,283
|
proj2/src/matmul.s
|
.globl matmul
.text
# =======================================================
# FUNCTION: Matrix Multiplication of 2 integer matrices
# d = matmul(m0, m1)
# Arguments:
# a0 (int*) is the pointer to the start of m0
# a1 (int) is the # of rows (height) of m0
# a2 (int) is the # of columns (width) of m0
# a3 (int*) is the pointer to the start of m1
# a4 (int) is the # of rows (height) of m1
# a5 (int) is the # of columns (width) of m1
# a6 (int*) is the pointer to the the start of d
# Returns:
# None (void), sets d = matmul(m0, m1)
# Exceptions:
# Make sure to check in top to bottom order!
# - If the dimensions of m0 do not make sense,
# this function terminates the program with exit code 72.
# - If the dimensions of m1 do not make sense,
# this function terminates the program with exit code 73.
# - If the dimensions of m0 and m1 don't match,
# this function terminates the program with exit code 74.
# =======================================================
matmul:
li t0, 1
blt a1, t0, error1
blt a2, t0, error1
blt a4, t0, error2
blt a5, t0, error2
bne a2, a4, error3
# Error checks
addi sp, sp, -40
sw s3, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
sw s2, 12(sp)
sw ra, 16(sp)
sw s4, 20(sp)
sw s5, 24(sp)
sw s6, 28(sp)
sw s7, 32(sp)
sw s8, 36(sp)
# Prologue
mv s0, zero
mv s1, zero
mv s8, a0 # Pointer to A
mv s7, a3 # Pointer to B
mv s6, a2 # length
mv s5, a6
mv s2, a1
mv s3, a5
outer_loop_start:
mv s4, s7
mv s0, zero
inner_loop_start:
mv a0, s8
mv a1, s4
mv a2, s6
li a3, 1
mv a4, s3
jal ra, dot
sw a0, 0(s5)
addi s5, s5, 4
addi s4, s4, 4
addi s0, s0, 1
blt s0, s3, inner_loop_start
add s8, s8, s6
add s8, s8, s6
add s8, s8, s6
add s8, s8, s6
addi s1, s1, 1
blt s1, s2, outer_loop_start
inner_loop_end:
outer_loop_end:
lw s3, 0(sp)
lw s0, 4(sp)
lw s1, 8(sp)
lw s2, 12(sp)
lw ra, 16(sp)
lw s4, 20(sp)
lw s5, 24(sp)
lw s6, 28(sp)
lw s7, 32(sp)
lw s8, 36(sp)
addi sp, sp, 40
# Epilogue
ret
error1:
li a1, 72
j exit2
error2:
li a1, 73
j exit2
error3:
li a1, 74
j exit2
|
ciron-nine/CS61C-2020-FALL
| 2,605
|
proj2/src/read_matrix.s
|
.globl read_matrix
.text
# ==============================================================================
# FUNCTION: Allocates memory and reads in a binary file as a matrix of integers
#
# FILE FORMAT:
# The first 8 bytes are two 4 byte ints representing the # of rows and columns
# in the matrix. Every 4 bytes afterwards is an element of the matrix in
# row-major order.
# Arguments:
# a0 (char*) is the pointer to string representing the filename
# a1 (int*) is a pointer to an integer, we will set it to the number of rows
# a2 (int*) is a pointer to an integer, we will set it to the number of columns
# Returns:
# a0 (int*) is the pointer to the matrix in memory
# Exceptions:
# - If malloc returns an error,
# this function terminates the program with error code 88.
# - If you receive an fopen error or eof,
# this function terminates the program with error code 90.
# - If you receive an fread error or eof,
# this function terminates the program with error code 91.
# - If you receive an fclose error or eof,
# this function terminates the program with error code 92.
# ==============================================================================
read_matrix:
addi sp, sp, -40
sw s3, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
sw s2, 12(sp)
sw ra, 16(sp)
sw s4, 20(sp)
sw s5, 24(sp)
sw s6, 28(sp)
sw s7, 32(sp)
sw s8, 36(sp)
# Prologue
mv s0, a1 #pointer rows
mv s1, a2 #pointer cols
mv a1, a0
li a2, 0
jal ra, fopen
li t0, -1
beq a0, t0, openerror
mv s2, a0 #file pointer
li a0, 8
jal ra, malloc
beq a0, zero, mallocerror
mv s4, a0 #eight pointer
li a3, 8
mv a2, a0
mv a1, s2
jal ra, fread
li t0, 8
bne a0, t0, readerror
lw t0, 0(s4)
sw t0, 0(s0)
lw t0, 4(s4)
sw t0, 0(s1)
mv a0, s4
jal ra, free
lw t0, 0(s0)
lw t1, 0(s1)
mul s3, t0, t1 #size of mat
li t0, 4
mul s3, s3, t0
mv a0, s3
jal ra, malloc
beq a0, zero, mallocerror
mv s4, a0 #res pointer
mv a2, a0
mv a3, s3
mv a1, s2
jal ra, fread
bne a0, s3, readerror
mv a1, s2
jal ra, fclose
li t0, -1
beq a0, t0, closeerror
mv a0, s4
# Epilogue
lw s3, 0(sp)
lw s0, 4(sp)
lw s1, 8(sp)
lw s2, 12(sp)
lw ra, 16(sp)
lw s4, 20(sp)
lw s5, 24(sp)
lw s6, 28(sp)
lw s7, 32(sp)
lw s8, 36(sp)
addi sp, sp, 40
ret
openerror:
li a1, 90
j exit2
mallocerror:
li a1, 88
j exit2
readerror:
li a1, 91
j exit2
closeerror:
li a1, 92
j exit2
|
ciron-nine/CS61C-2020-FALL
| 3,808
|
proj2/src/classify.s
|
.globl classify
.text
classify:
# =====================================
# COMMAND LINE ARGUMENTS
# =====================================
# Args:
# a0 (int) argc
# a1 (char**) argv
# a2 (int) print_classification, if this is zero,
# you should print the classification. Otherwise,
# this function should not print ANYTHING.
# Returns:
# a0 (int) Classification
# Exceptions:
# - If there are an incorrect number of command line args,
# this function terminates the program with exit code 89.
# - If malloc fails, this function terminats the program with exit code 88.
#
# Usage:
# main.s <M0_PATH> <M1_PATH> <INPUT_PATH> <OUTPUT_PATH>
li t0, 5
bne a0, t0, incargs
#prologue
addi sp, sp, -40
sw s3, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
sw s2, 12(sp)
sw ra, 16(sp)
sw s4, 20(sp)
sw s5, 24(sp)
sw s6, 28(sp)
sw s7, 32(sp)
sw s8, 36(sp)
# =====================================
# LOAD MATRICES
# =====================================
mv s0, a1 # argv
mv s1, a2 # classify sign
# Load pretrained m0
li a0, 8
jal ra, malloc
beq a0, zero, mallocerror
mv s2, a0 # m0 row col pointer
mv a1, s2
lw a0, 4(s0)
addi a2, s2, 4
jal ra, read_matrix
mv s3, a0 # m0 matrix pointer
# Load pretrained m1
li a0, 8
jal ra, malloc
beq a0, zero, mallocerror
mv s4, a0 # m1 rowpointer
mv a1, s4
lw a0, 8(s0)
addi a2, s4, 4
jal ra, read_matrix
mv s5, a0 # m1 matrix pointer
# Load input matrix
li a0, 8
jal ra, malloc
beq a0, zero, mallocerror
mv s6, a0 # input rowpointer
mv a1, s6
lw a0, 12(s0)
addi a2, s6, 4
jal ra, read_matrix
mv s7, a0 # input matrix pointer
# =====================================
# RUN LAYERS
# =====================================
# 1. LINEAR LAYER: m0 * input
# 2. NONLINEAR LAYER: ReLU(m0 * input)
# 3. LINEAR LAYER: m1 * ReLU(m0 * input)
li t0, 4
lw t1, 0(s2)
lw t2, 4(s6)
mul t1, t0, t1
mul t0, t1, t2
mv a0, t0
jal ra, malloc
beq a0, zero, mallocerror
mv a6, a0
mv s8, a0
lw a1, 0(s2)
lw a2, 4(s2)
mv a0, s3
mv a3, s7
lw a4, 0(s6)
lw a5, 4(s6)
jal ra, matmul
lw t1, 0(s2)
lw t2, 4(s6)
mul t0, t1, t2
mv a0, s8
mv a1, t0
jal ra, relu
mv a0, s3
jal ra, free
mv a0, s7
jal ra, free
lw t0, 0(s4)
lw t1, 4(s6)
mul t0, t0, t1
li t1, 4
mul t0, t0, t1
mv a0, t0
jal ra, malloc
beq a0, zero ,mallocerror
mv s3, a0
mv a0, s5
lw a1, 0(s4)
lw a2, 4(s4)
mv a3, s8
lw a4, 0(s2)
lw a5, 4(s6)
mv a6, s3
jal ra, matmul
# =====================================
# WRITE OUTPUT
# =====================================
# Write output matrix
lw a0, 16(s0)
lw a2, 0(s4)
lw a3, 4(s6)
mv a1, s3
jal ra, write_matrix
bne s1, zero, exitt
# =====================================
# CALCULATE CLASSIFICATION/LABEL
# =====================================
# Call argmax
mv a0, s3
lw t0, 0(s4)
lw t1, 4(s6)
mul a1, t0, t1
jal ra, argmax
# Print classification
mv a1, a0
jal ra, print_int
# Print newline afterwards for clarity
li a1, 10
jal ra, print_char
exitt:
mv a0, s1
# Epilogue
lw s3, 0(sp)
lw s0, 4(sp)
lw s1, 8(sp)
lw s2, 12(sp)
lw ra, 16(sp)
lw s4, 20(sp)
lw s5, 24(sp)
lw s6, 28(sp)
lw s7, 32(sp)
lw s8, 36(sp)
addi sp, sp, 40
ret
incargs:
li a1, 89
j exit2
mallocerror:
li a1, 88
j exit2
|
cjsjz/rcore-os
| 1,640
|
os/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
cliffeh/rvem
| 1,744
|
tests/data/fib.s
|
.data
n:
.word 42
.text
.globl _start
_start:
li x2, 0 # Used to determine if n (x7) equals 0
li x3, 1 # Used to determine if n (x7) equals 1
li x5, 0 # First number
li x6, 1 # Second number
lw x7, n # Limit
li x8, 1 # Counter
beq x7, x2, DO # If n == 0 then jump to DO (Which shoud print 0). Implements f(0) = 0
beq x7, x3, WRITE # if n == 1 then jump to WRITE (Which should print 1). Implements f(1) = 1
LOOP:
beq x8, x7, EXIT # Comparse the counter x8 which starts with 1 to n (limit). If x8 == x7 jump to EXIT
add x4, x5, x6 # Add x5 to x6 and store in x4
ori x5, x6, 0 # Assign the second number to my first number
ori x6, x4, 0 # Assign the sum of x5 and x6 to my second number
addi x8, x8, 1 # Add 1 to my counter
j LOOP # Jump to loop
EXIT:
li x17, 1 # Load constant 1 to x17
add x10,x4,x0 # Add x4 (which contains the result after the above coe) to x10
ecall # Issue an SystemCall which prints an integer (Because of the 1 in x17)
li x17, 10
ecall # Reads an int from input console (Because of the 10 in x17)
DO:
li x4, 0 # load 0 in x10 (x10 will be used by the SysCall to print) and print
add x10,x4,x0
li x17, 1
ecall
li x17, 10
ecall
WRITE:
li x4, 1 # load 1 in x10 and print
add x10,x4,x0
li x17,1
ecall
li x17, 10
ecall
|
cliffeh/rvem
| 2,664
|
tests/data/complexMul.s
|
# This example demonstrates an implementation of the multiplication of two
# complex numbers z = 1 + 3i, w = 5 + 4i.
.data
aa: .word 1 # Real part of z
bb: .word 3 # Imag. part of z
cc: .word 5 # Real part of w
dd: .word 4 # Imag part of w
str: .string " + i* "
.text
.globl _start
_start:
lw a0, aa
lw a1, bb
lw a2, cc
lw a3, dd
# Do complex multiplication of numbers a0-a3
jal complexMul
mv t0, a1 # Move imaginary value to t0
mv a0, a0 # Move real value to a1
# Print real value (in a0) by setting ecall argument to 1
li a7, 1
ecall
# Print delimiter string (pointer in a0) by setting ecall argument to 4
la a0, str
li a7, 4
ecall
# Print imaginary value (in a0) by setting ecall argument to 1
mv a0, t0 # Move imaginary value to a1
li a7, 1
ecall
# Exit program
li a7, 10
ecall
myMult:
li t0, 32 # Iteration variable
li t3, 0 # initialize temporary product register to 0
start:
mv t1, a1 # move multiplier to temporary register
andi t1, t1, 1 # mask first bit
beq t1, x0, shift
add t3, t3, a0
shift:
slli a0, a0, 1
srai a1, a1, 1 # make an arithmetic right shift for signed multiplication
addi t0, t0, -1 # decrement loop index
bnez t0, start # branch if loop index is not 0
mv a0, t3 # move final product to result register
jr x1
complexMul:
# Place the 4 input arguments and return address on the stack
addi sp, sp, -28
sw x0, 24(sp) # tmp. res 2
sw x0, 20(sp) # tmp. res 1
sw ra, 16(sp) # return address
sw a0, 12(sp) # a
sw a1, 8(sp) # b
sw a2, 4(sp) # c
sw a3, 0(sp) # d
# (a + ib)(c + id) = (ac − bd) + i(ad + bc)
# Step 1: a*c
mv a1, a2 # Move C from a2 to a1
jal myMult
sw a0, 20(sp) # push onto tmp. res 1
# step 2: b*d
lw a0, 8(sp)
lw a1, 0(sp)
jal myMult
# step 3: (ac − bd)
lw t0, 20(sp) # Reload a*c from stack
sub t2, t0, a0 # t2 contains real part of multiplication
# push (ac − bd) onto tmp. res 1 from stack
sw t2, 20(sp)
# Step 4: a*d
lw a0, 12(sp)
lw a1, 0(sp)
jal myMult
sw a0, 24(sp) # store a*d in tmp. res 2
# step 5: b*c
lw a0, 8(sp)
lw a1, 4(sp)
jal myMult
mv a1, a0 # moving result to a1 saves us 1 operation later on
# step 6: (ad + bc)
lw t0, 24(sp) # Reload a*c from stack
add a1, t0, a1 # a1 contains imag part of multiplication
lw a0, 20(sp) # Load real result from tmp. res 1
lw ra, 16(sp) # Reload return address from stack
addi sp, sp, 28 # Restore stack pointer
jr x1
|
clorf6/ACore
| 4,438
|
kernel/src/link_app.S
|
.align 3
.section .data
.global _num_app
_num_app:
.quad 19
.quad app_0_start
.quad app_1_start
.quad app_2_start
.quad app_3_start
.quad app_4_start
.quad app_5_start
.quad app_6_start
.quad app_7_start
.quad app_8_start
.quad app_9_start
.quad app_10_start
.quad app_11_start
.quad app_12_start
.quad app_13_start
.quad app_14_start
.quad app_15_start
.quad app_16_start
.quad app_17_start
.quad app_18_start
.quad app_18_end
.global _app_names
_app_names:
.string "exit"
.string "fantastic_text"
.string "forkexec"
.string "forktest"
.string "forktest2"
.string "forktest_simple"
.string "forktree"
.string "hello_world"
.string "initproc"
.string "manager"
.string "matrix"
.string "priority_test"
.string "sleep"
.string "sleep_simple"
.string "stack_overflow"
.string "user_shell"
.string "usertests"
.string "usertests-simple"
.string "yield"
.section .data
.global app_0_start
.global app_0_end
.align 3
app_0_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/exit"
app_0_end:
.section .data
.global app_1_start
.global app_1_end
.align 3
app_1_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/fantastic_text"
app_1_end:
.section .data
.global app_2_start
.global app_2_end
.align 3
app_2_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forkexec"
app_2_end:
.section .data
.global app_3_start
.global app_3_end
.align 3
app_3_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest"
app_3_end:
.section .data
.global app_4_start
.global app_4_end
.align 3
app_4_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest2"
app_4_end:
.section .data
.global app_5_start
.global app_5_end
.align 3
app_5_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest_simple"
app_5_end:
.section .data
.global app_6_start
.global app_6_end
.align 3
app_6_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktree"
app_6_end:
.section .data
.global app_7_start
.global app_7_end
.align 3
app_7_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/hello_world"
app_7_end:
.section .data
.global app_8_start
.global app_8_end
.align 3
app_8_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/initproc"
app_8_end:
.section .data
.global app_9_start
.global app_9_end
.align 3
app_9_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/manager"
app_9_end:
.section .data
.global app_10_start
.global app_10_end
.align 3
app_10_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/matrix"
app_10_end:
.section .data
.global app_11_start
.global app_11_end
.align 3
app_11_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/priority_test"
app_11_end:
.section .data
.global app_12_start
.global app_12_end
.align 3
app_12_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep"
app_12_end:
.section .data
.global app_13_start
.global app_13_end
.align 3
app_13_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep_simple"
app_13_end:
.section .data
.global app_14_start
.global app_14_end
.align 3
app_14_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/stack_overflow"
app_14_end:
.section .data
.global app_15_start
.global app_15_end
.align 3
app_15_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/user_shell"
app_15_end:
.section .data
.global app_16_start
.global app_16_end
.align 3
app_16_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests"
app_16_end:
.section .data
.global app_17_start
.global app_17_end
.align 3
app_17_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests-simple"
app_17_end:
.section .data
.global app_18_start
.global app_18_end
.align 3
app_18_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/yield"
app_18_end:
|
cloudfstrife/rCore
| 1,589
|
01-BatchOS/os/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# case1: start running app by __restore
# case2: back to U after handling trap
mv sp, a0
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret
|
clexp/Primary_Care_Load_Management_Tool
| 15,124
|
streamlit_env_py312/lib/python3.12/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/atomic_support.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd1__TBB_full_fence#
.global __TBB_machine_fetchadd1__TBB_full_fence#
__TBB_machine_fetchadd1__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd1acquire
}
.endp __TBB_machine_fetchadd1__TBB_full_fence#
.proc __TBB_machine_fetchadd1acquire#
.global __TBB_machine_fetchadd1acquire#
__TBB_machine_fetchadd1acquire:
ld1 r9=[r32]
;;
Retry_1acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg1.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_1acquire
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd1acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore1__TBB_full_fence#
.global __TBB_machine_fetchstore1__TBB_full_fence#
__TBB_machine_fetchstore1__TBB_full_fence:
mf
;;
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1__TBB_full_fence#
.proc __TBB_machine_fetchstore1acquire#
.global __TBB_machine_fetchstore1acquire#
__TBB_machine_fetchstore1acquire:
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp1__TBB_full_fence#
.global __TBB_machine_cmpswp1__TBB_full_fence#
__TBB_machine_cmpswp1__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp1acquire
}
.endp __TBB_machine_cmpswp1__TBB_full_fence#
.proc __TBB_machine_cmpswp1acquire#
.global __TBB_machine_cmpswp1acquire#
__TBB_machine_cmpswp1acquire:
zxt1 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg1.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp1acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd2__TBB_full_fence#
.global __TBB_machine_fetchadd2__TBB_full_fence#
__TBB_machine_fetchadd2__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd2acquire
}
.endp __TBB_machine_fetchadd2__TBB_full_fence#
.proc __TBB_machine_fetchadd2acquire#
.global __TBB_machine_fetchadd2acquire#
__TBB_machine_fetchadd2acquire:
ld2 r9=[r32]
;;
Retry_2acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg2.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_2acquire
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd2acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore2__TBB_full_fence#
.global __TBB_machine_fetchstore2__TBB_full_fence#
__TBB_machine_fetchstore2__TBB_full_fence:
mf
;;
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2__TBB_full_fence#
.proc __TBB_machine_fetchstore2acquire#
.global __TBB_machine_fetchstore2acquire#
__TBB_machine_fetchstore2acquire:
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp2__TBB_full_fence#
.global __TBB_machine_cmpswp2__TBB_full_fence#
__TBB_machine_cmpswp2__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp2acquire
}
.endp __TBB_machine_cmpswp2__TBB_full_fence#
.proc __TBB_machine_cmpswp2acquire#
.global __TBB_machine_cmpswp2acquire#
__TBB_machine_cmpswp2acquire:
zxt2 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg2.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp2acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd4__TBB_full_fence#
.global __TBB_machine_fetchadd4__TBB_full_fence#
__TBB_machine_fetchadd4__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd4acquire
}
.endp __TBB_machine_fetchadd4__TBB_full_fence#
.proc __TBB_machine_fetchadd4acquire#
.global __TBB_machine_fetchadd4acquire#
__TBB_machine_fetchadd4acquire:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_4acquire
(p8) br.cond.dpnt Dec_4acquire
;;
ld4 r9=[r32]
;;
Retry_4acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg4.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_4acquire
br.ret.sptk.many b0
Inc_4acquire:
fetchadd4.acq r8=[r32],1
br.ret.sptk.many b0
Dec_4acquire:
fetchadd4.acq r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd4acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore4__TBB_full_fence#
.global __TBB_machine_fetchstore4__TBB_full_fence#
__TBB_machine_fetchstore4__TBB_full_fence:
mf
;;
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4__TBB_full_fence#
.proc __TBB_machine_fetchstore4acquire#
.global __TBB_machine_fetchstore4acquire#
__TBB_machine_fetchstore4acquire:
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp4__TBB_full_fence#
.global __TBB_machine_cmpswp4__TBB_full_fence#
__TBB_machine_cmpswp4__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp4acquire
}
.endp __TBB_machine_cmpswp4__TBB_full_fence#
.proc __TBB_machine_cmpswp4acquire#
.global __TBB_machine_cmpswp4acquire#
__TBB_machine_cmpswp4acquire:
zxt4 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg4.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp4acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd8__TBB_full_fence#
.global __TBB_machine_fetchadd8__TBB_full_fence#
__TBB_machine_fetchadd8__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd8acquire
}
.endp __TBB_machine_fetchadd8__TBB_full_fence#
.proc __TBB_machine_fetchadd8acquire#
.global __TBB_machine_fetchadd8acquire#
__TBB_machine_fetchadd8acquire:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_8acquire
(p8) br.cond.dpnt Dec_8acquire
;;
ld8 r9=[r32]
;;
Retry_8acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg8.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_8acquire
br.ret.sptk.many b0
Inc_8acquire:
fetchadd8.acq r8=[r32],1
br.ret.sptk.many b0
Dec_8acquire:
fetchadd8.acq r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd8acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore8__TBB_full_fence#
.global __TBB_machine_fetchstore8__TBB_full_fence#
__TBB_machine_fetchstore8__TBB_full_fence:
mf
;;
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8__TBB_full_fence#
.proc __TBB_machine_fetchstore8acquire#
.global __TBB_machine_fetchstore8acquire#
__TBB_machine_fetchstore8acquire:
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp8__TBB_full_fence#
.global __TBB_machine_cmpswp8__TBB_full_fence#
__TBB_machine_cmpswp8__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp8acquire
}
.endp __TBB_machine_cmpswp8__TBB_full_fence#
.proc __TBB_machine_cmpswp8acquire#
.global __TBB_machine_cmpswp8acquire#
__TBB_machine_cmpswp8acquire:
mov ar.ccv=r34
;;
cmpxchg8.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp8acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd1release#
.global __TBB_machine_fetchadd1release#
__TBB_machine_fetchadd1release:
ld1 r9=[r32]
;;
Retry_1release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg1.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_1release
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd1release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore1release#
.global __TBB_machine_fetchstore1release#
__TBB_machine_fetchstore1release:
mf
;;
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp1release#
.global __TBB_machine_cmpswp1release#
__TBB_machine_cmpswp1release:
zxt1 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg1.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp1release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd2release#
.global __TBB_machine_fetchadd2release#
__TBB_machine_fetchadd2release:
ld2 r9=[r32]
;;
Retry_2release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg2.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_2release
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd2release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore2release#
.global __TBB_machine_fetchstore2release#
__TBB_machine_fetchstore2release:
mf
;;
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp2release#
.global __TBB_machine_cmpswp2release#
__TBB_machine_cmpswp2release:
zxt2 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg2.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp2release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd4release#
.global __TBB_machine_fetchadd4release#
__TBB_machine_fetchadd4release:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_4release
(p8) br.cond.dpnt Dec_4release
;;
ld4 r9=[r32]
;;
Retry_4release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg4.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_4release
br.ret.sptk.many b0
Inc_4release:
fetchadd4.rel r8=[r32],1
br.ret.sptk.many b0
Dec_4release:
fetchadd4.rel r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd4release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore4release#
.global __TBB_machine_fetchstore4release#
__TBB_machine_fetchstore4release:
mf
;;
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp4release#
.global __TBB_machine_cmpswp4release#
__TBB_machine_cmpswp4release:
zxt4 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg4.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp4release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd8release#
.global __TBB_machine_fetchadd8release#
__TBB_machine_fetchadd8release:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_8release
(p8) br.cond.dpnt Dec_8release
;;
ld8 r9=[r32]
;;
Retry_8release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg8.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_8release
br.ret.sptk.many b0
Inc_8release:
fetchadd8.rel r8=[r32],1
br.ret.sptk.many b0
Dec_8release:
fetchadd8.rel r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd8release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore8release#
.global __TBB_machine_fetchstore8release#
__TBB_machine_fetchstore8release:
mf
;;
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp8release#
.global __TBB_machine_cmpswp8release#
__TBB_machine_cmpswp8release:
mov ar.ccv=r34
;;
cmpxchg8.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp8release#
|
clexp/Primary_Care_Load_Management_Tool
| 1,304
|
streamlit_env_py312/lib/python3.12/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/log2.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
.section .text
.align 16
// unsigned long __TBB_machine_lg( unsigned long x );
// r32 = x
.proc __TBB_machine_lg#
.global __TBB_machine_lg#
__TBB_machine_lg:
shr r16=r32,1 // .x
;;
shr r17=r32,2 // ..x
or r32=r32,r16 // xx
;;
shr r16=r32,3 // ...xx
or r32=r32,r17 // xxx
;;
shr r17=r32,5 // .....xxx
or r32=r32,r16 // xxxxx
;;
shr r16=r32,8 // ........xxxxx
or r32=r32,r17 // xxxxxxxx
;;
shr r17=r32,13
or r32=r32,r16 // 13x
;;
shr r16=r32,21
or r32=r32,r17 // 21x
;;
shr r17=r32,34
or r32=r32,r16 // 34x
;;
shr r16=r32,55
or r32=r32,r17 // 55x
;;
or r32=r32,r16 // 64x
;;
popcnt r8=r32
;;
add r8=-1,r8
br.ret.sptk.many b0
.endp __TBB_machine_lg#
|
clexp/Primary_Care_Load_Management_Tool
| 1,270
|
streamlit_env_py312/lib/python3.12/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/lock_byte.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Support for class TinyLock
.section .text
.align 16
// unsigned int __TBB_machine_trylockbyte( byte& flag );
// r32 = address of flag
.proc __TBB_machine_trylockbyte#
.global __TBB_machine_trylockbyte#
ADDRESS_OF_FLAG=r32
RETCODE=r8
FLAG=r9
BUSY=r10
SCRATCH=r11
__TBB_machine_trylockbyte:
ld1.acq FLAG=[ADDRESS_OF_FLAG]
mov BUSY=1
mov RETCODE=0
;;
cmp.ne p6,p0=0,FLAG
mov ar.ccv=r0
(p6) br.ret.sptk.many b0
;;
cmpxchg1.acq SCRATCH=[ADDRESS_OF_FLAG],BUSY,ar.ccv // Try to acquire lock
;;
cmp.eq p6,p0=0,SCRATCH
;;
(p6) mov RETCODE=1
br.ret.sptk.many b0
.endp __TBB_machine_trylockbyte#
|
clexp/Primary_Care_Load_Management_Tool
| 2,687
|
streamlit_env_py312/lib/python3.12/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/ia64_misc.s
|
// Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// RSE backing store pointer retrieval
.section .text
.align 16
.proc __TBB_get_bsp#
.global __TBB_get_bsp#
__TBB_get_bsp:
mov r8=ar.bsp
br.ret.sptk.many b0
.endp __TBB_get_bsp#
.section .text
.align 16
.proc __TBB_machine_load8_relaxed#
.global __TBB_machine_load8_relaxed#
__TBB_machine_load8_relaxed:
ld8 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load8_relaxed#
.section .text
.align 16
.proc __TBB_machine_store8_relaxed#
.global __TBB_machine_store8_relaxed#
__TBB_machine_store8_relaxed:
st8 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store8_relaxed#
.section .text
.align 16
.proc __TBB_machine_load4_relaxed#
.global __TBB_machine_load4_relaxed#
__TBB_machine_load4_relaxed:
ld4 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load4_relaxed#
.section .text
.align 16
.proc __TBB_machine_store4_relaxed#
.global __TBB_machine_store4_relaxed#
__TBB_machine_store4_relaxed:
st4 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store4_relaxed#
.section .text
.align 16
.proc __TBB_machine_load2_relaxed#
.global __TBB_machine_load2_relaxed#
__TBB_machine_load2_relaxed:
ld2 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load2_relaxed#
.section .text
.align 16
.proc __TBB_machine_store2_relaxed#
.global __TBB_machine_store2_relaxed#
__TBB_machine_store2_relaxed:
st2 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store2_relaxed#
.section .text
.align 16
.proc __TBB_machine_load1_relaxed#
.global __TBB_machine_load1_relaxed#
__TBB_machine_load1_relaxed:
ld1 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load1_relaxed#
.section .text
.align 16
.proc __TBB_machine_store1_relaxed#
.global __TBB_machine_store1_relaxed#
__TBB_machine_store1_relaxed:
st1 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store1_relaxed#
|
Code-Allergy/rust-kern
| 1,831
|
bootloader/src/boot.S
|
.arm
/* these are not real */
#define MODE_Usr 0x10 /* thread-mode, unprivileged */
#define MODE_FIQ 0x11 /* FIQ-mode (always privileged) */
#define MODE_IRQ 0x12 /* IRQ-mode (always privileged) */
#define MODE_Supervisor 0x13 /* SVC-mode (always privileged) */
#define MODE_Abort 0x17 /* Abort-mode (always privileged) */
#define MODE_Undef 0x1B /* Undefined-mode (always privileged) */
#define MODE_System 0x1F /* thread-mode, privileged */
#define I_F_BIT 0xC0 /* I and F bits for CPSR register */
#define STACK_SIZE 0x1000 /* 4KB */
.global _init
.section .init
_init:
/* Set up stacks for different CPU modes */
/* Enter IRQ mode */
ldr r0, =__StackStart
msr cpsr_c, #(MODE_IRQ|I_F_BIT)
mov sp, r0
sub r0, r0, #STACK_SIZE
/* Enter FIQ mode */
msr cpsr_c, #(MODE_FIQ|I_F_BIT)
mov sp, r0
sub r0, r0, #STACK_SIZE
/* Enter Abort mode */
msr cpsr_c, #(MODE_Abort|I_F_BIT)
mov sp, r0
sub r0, r0, #STACK_SIZE
/* Enter Undefined mode */
msr cpsr_c, #(MODE_Undef|I_F_BIT)
mov sp, r0
sub r0, r0, #STACK_SIZE
/* Enter Supervisor mode */
msr cpsr_c, #(MODE_Supervisor|I_F_BIT)
mov sp, r0
sub r0, r0, #STACK_SIZE
/* Enter System mode */
msr cpsr_c, #(MODE_System|I_F_BIT)
mov sp, r0
/* cortex v8, page 3-45 - cache and branch predictor */
mrc p15, 0, r0, c1, c0, 0 /* read system control register */
/* data cache, strict alignment, MMU (MMU = 1 current breaks, idk) */
orr r0, r0, #0b100
/* Instruction cache, Program flow predict */
orr r0, r0, #(0b11 << 11)
/* enable arm exceptions, clear the 30th bit in r0 */
bic r0, r0, #(1 << 30)
mcr p15, 0, r0, c1, c0, 0 /* system control register */
/* L2 cache enable */
mrc p15, 0, r0, c1, c0, 1
orr r0, r0, #0b10
mcr p15, 0, r0, c1, c0, 1 /* auxiliary control reg */
bl rust_main
idle:
wfi /* wait for interrupt */
b idle
|
codegasms/ctf
| 9,632
|
ShunyaCTF-2024/Returning to Winning/c.s
|
c: file format elf64-x86-64
Disassembly of section .init:
0000000000401000 <.init>:
401000: 48 83 ec 08 sub $0x8,%rsp
401004: 48 8b 05 d5 2f 00 00 mov 0x2fd5(%rip),%rax # 403fe0 <fopen@plt+0x2f40>
40100b: 48 85 c0 test %rax,%rax
40100e: 74 02 je 401012 <putchar@plt-0x1e>
401010: ff d0 call *%rax
401012: 48 83 c4 08 add $0x8,%rsp
401016: c3 ret
Disassembly of section .plt:
0000000000401020 <putchar@plt-0x10>:
401020: ff 35 ca 2f 00 00 push 0x2fca(%rip) # 403ff0 <fopen@plt+0x2f50>
401026: ff 25 cc 2f 00 00 jmp *0x2fcc(%rip) # 403ff8 <fopen@plt+0x2f58>
40102c: 0f 1f 40 00 nopl 0x0(%rax)
0000000000401030 <putchar@plt>:
401030: ff 25 ca 2f 00 00 jmp *0x2fca(%rip) # 404000 <fopen@plt+0x2f60>
401036: 68 00 00 00 00 push $0x0
40103b: e9 e0 ff ff ff jmp 401020 <putchar@plt-0x10>
0000000000401040 <puts@plt>:
401040: ff 25 c2 2f 00 00 jmp *0x2fc2(%rip) # 404008 <fopen@plt+0x2f68>
401046: 68 01 00 00 00 push $0x1
40104b: e9 d0 ff ff ff jmp 401020 <putchar@plt-0x10>
0000000000401050 <fclose@plt>:
401050: ff 25 ba 2f 00 00 jmp *0x2fba(%rip) # 404010 <fopen@plt+0x2f70>
401056: 68 02 00 00 00 push $0x2
40105b: e9 c0 ff ff ff jmp 401020 <putchar@plt-0x10>
0000000000401060 <printf@plt>:
401060: ff 25 b2 2f 00 00 jmp *0x2fb2(%rip) # 404018 <fopen@plt+0x2f78>
401066: 68 03 00 00 00 push $0x3
40106b: e9 b0 ff ff ff jmp 401020 <putchar@plt-0x10>
0000000000401070 <fgetc@plt>:
401070: ff 25 aa 2f 00 00 jmp *0x2faa(%rip) # 404020 <fopen@plt+0x2f80>
401076: 68 04 00 00 00 push $0x4
40107b: e9 a0 ff ff ff jmp 401020 <putchar@plt-0x10>
0000000000401080 <gets@plt>:
401080: ff 25 a2 2f 00 00 jmp *0x2fa2(%rip) # 404028 <fopen@plt+0x2f88>
401086: 68 05 00 00 00 push $0x5
40108b: e9 90 ff ff ff jmp 401020 <putchar@plt-0x10>
0000000000401090 <setvbuf@plt>:
401090: ff 25 9a 2f 00 00 jmp *0x2f9a(%rip) # 404030 <fopen@plt+0x2f90>
401096: 68 06 00 00 00 push $0x6
40109b: e9 80 ff ff ff jmp 401020 <putchar@plt-0x10>
00000000004010a0 <fopen@plt>:
4010a0: ff 25 92 2f 00 00 jmp *0x2f92(%rip) # 404038 <fopen@plt+0x2f98>
4010a6: 68 07 00 00 00 push $0x7
4010ab: e9 70 ff ff ff jmp 401020 <putchar@plt-0x10>
Disassembly of section .text:
00000000004010b0 <.text>:
4010b0: 31 ed xor %ebp,%ebp
4010b2: 49 89 d1 mov %rdx,%r9
4010b5: 5e pop %rsi
4010b6: 48 89 e2 mov %rsp,%rdx
4010b9: 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
4010bd: 50 push %rax
4010be: 54 push %rsp
4010bf: 45 31 c0 xor %r8d,%r8d
4010c2: 31 c9 xor %ecx,%ecx
4010c4: 48 c7 c7 6f 12 40 00 mov $0x40126f,%rdi
4010cb: ff 15 07 2f 00 00 call *0x2f07(%rip) # 403fd8 <fopen@plt+0x2f38>
4010d1: f4 hlt
4010d2: 66 2e 0f 1f 84 00 00 cs nopw 0x0(%rax,%rax,1)
4010d9: 00 00 00
4010dc: 0f 1f 40 00 nopl 0x0(%rax)
4010e0: c3 ret
4010e1: 66 2e 0f 1f 84 00 00 cs nopw 0x0(%rax,%rax,1)
4010e8: 00 00 00
4010eb: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
4010f0: b8 50 40 40 00 mov $0x404050,%eax
4010f5: 48 3d 50 40 40 00 cmp $0x404050,%rax
4010fb: 74 13 je 401110 <fopen@plt+0x70>
4010fd: b8 00 00 00 00 mov $0x0,%eax
401102: 48 85 c0 test %rax,%rax
401105: 74 09 je 401110 <fopen@plt+0x70>
401107: bf 50 40 40 00 mov $0x404050,%edi
40110c: ff e0 jmp *%rax
40110e: 66 90 xchg %ax,%ax
401110: c3 ret
401111: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
401118: 00 00 00 00
40111c: 0f 1f 40 00 nopl 0x0(%rax)
401120: be 50 40 40 00 mov $0x404050,%esi
401125: 48 81 ee 50 40 40 00 sub $0x404050,%rsi
40112c: 48 89 f0 mov %rsi,%rax
40112f: 48 c1 ee 3f shr $0x3f,%rsi
401133: 48 c1 f8 03 sar $0x3,%rax
401137: 48 01 c6 add %rax,%rsi
40113a: 48 d1 fe sar %rsi
40113d: 74 11 je 401150 <fopen@plt+0xb0>
40113f: b8 00 00 00 00 mov $0x0,%eax
401144: 48 85 c0 test %rax,%rax
401147: 74 07 je 401150 <fopen@plt+0xb0>
401149: bf 50 40 40 00 mov $0x404050,%edi
40114e: ff e0 jmp *%rax
401150: c3 ret
401151: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
401158: 00 00 00 00
40115c: 0f 1f 40 00 nopl 0x0(%rax)
401160: f3 0f 1e fa endbr64
401164: 80 3d ed 2e 00 00 00 cmpb $0x0,0x2eed(%rip) # 404058 <stdout@GLIBC_2.2.5+0x8>
40116b: 75 13 jne 401180 <fopen@plt+0xe0>
40116d: 55 push %rbp
40116e: 48 89 e5 mov %rsp,%rbp
401171: e8 7a ff ff ff call 4010f0 <fopen@plt+0x50>
401176: c6 05 db 2e 00 00 01 movb $0x1,0x2edb(%rip) # 404058 <stdout@GLIBC_2.2.5+0x8>
40117d: 5d pop %rbp
40117e: c3 ret
40117f: 90 nop
401180: c3 ret
401181: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
401188: 00 00 00 00
40118c: 0f 1f 40 00 nopl 0x0(%rax)
401190: f3 0f 1e fa endbr64
401194: eb 8a jmp 401120 <fopen@plt+0x80>
; win function
401196: 55 push %rbp
401197: 48 89 e5 mov %rsp,%rbp
40119a: 48 83 ec 10 sub $0x10,%rsp
40119e: 48 8d 05 63 0e 00 00 lea 0xe63(%rip),%rax # 402008 <fopen@plt+0xf68>
4011a5: 48 89 c7 mov %rax,%rdi
4011a8: e8 93 fe ff ff call 401040 <puts@plt>
4011ad: 48 8d 05 85 0e 00 00 lea 0xe85(%rip),%rax # 402039 <fopen@plt+0xf99>
4011b4: 48 89 c6 mov %rax,%rsi
4011b7: 48 8d 05 7d 0e 00 00 lea 0xe7d(%rip),%rax # 40203b <fopen@plt+0xf9b>
4011be: 48 89 c7 mov %rax,%rdi
4011c1: e8 da fe ff ff call 4010a0 <fopen@plt>
4011c6: 48 89 45 f8 mov %rax,-0x8(%rbp)
4011ca: 48 83 7d f8 00 cmpq $0x0,-0x8(%rbp)
4011cf: 75 11 jne 4011e2 <fopen@plt+0x142>
4011d1: 48 8d 05 6c 0e 00 00 lea 0xe6c(%rip),%rax # 402044 <fopen@plt+0xfa4>
4011d8: 48 89 c7 mov %rax,%rdi
4011db: e8 60 fe ff ff call 401040 <puts@plt>
4011e0: eb 3d jmp 40121f <fopen@plt+0x17f>
4011e2: 48 8d 05 73 0e 00 00 lea 0xe73(%rip),%rax # 40205c <fopen@plt+0xfbc>
4011e9: 48 89 c7 mov %rax,%rdi
4011ec: e8 4f fe ff ff call 401040 <puts@plt>
4011f1: eb 0b jmp 4011fe <fopen@plt+0x15e>
4011f3: 0f be 45 f7 movsbl -0x9(%rbp),%eax
4011f7: 89 c7 mov %eax,%edi
4011f9: e8 32 fe ff ff call 401030 <putchar@plt>
4011fe: 48 8b 45 f8 mov -0x8(%rbp),%rax
401202: 48 89 c7 mov %rax,%rdi
401205: e8 66 fe ff ff call 401070 <fgetc@plt>
40120a: 88 45 f7 mov %al,-0x9(%rbp)
40120d: 80 7d f7 ff cmpb $0xff,-0x9(%rbp)
401211: 75 e0 jne 4011f3 <fopen@plt+0x153>
401213: 48 8b 45 f8 mov -0x8(%rbp),%rax
401217: 48 89 c7 mov %rax,%rdi
40121a: e8 31 fe ff ff call 401050 <fclose@plt>
40121f: c9 leave
401220: c3 ret
401221: 55 push %rbp
401222: 48 89 e5 mov %rsp,%rbp
401225: 48 83 ec 40 sub $0x40,%rsp
401229: 48 8b 05 20 2e 00 00 mov 0x2e20(%rip),%rax # 404050 <stdout@GLIBC_2.2.5>
401230: b9 00 00 00 00 mov $0x0,%ecx
401235: ba 02 00 00 00 mov $0x2,%edx
40123a: be 00 00 00 00 mov $0x0,%esi
40123f: 48 89 c7 mov %rax,%rdi
401242: e8 49 fe ff ff call 401090 <setvbuf@plt>
401247: 48 8d 05 2a 0e 00 00 lea 0xe2a(%rip),%rax # 402078 <fopen@plt+0xfd8>
40124e: 48 89 c7 mov %rax,%rdi
401251: b8 00 00 00 00 mov $0x0,%eax
401256: e8 05 fe ff ff call 401060 <printf@plt>
; gets call
40125b: 48 8d 45 c0 lea -0x40(%rbp),%rax
40125f: 48 89 c7 mov %rax,%rdi
401262: b8 00 00 00 00 mov $0x0,%eax
401267: e8 14 fe ff ff call 401080 <gets@plt>
40126c: 90 nop
40126d: c9 leave
40126e: c3 ret
40126f: 55 push %rbp
401270: 48 89 e5 mov %rsp,%rbp
401273: b8 00 00 00 00 mov $0x0,%eax
401278: e8 a4 ff ff ff call 401221 <fopen@plt+0x181>
40127d: b8 00 00 00 00 mov $0x0,%eax
401282: 5d pop %rbp
401283: c3 ret
Disassembly of section .fini:
0000000000401284 <.fini>:
401284: 48 83 ec 08 sub $0x8,%rsp
401288: 48 83 c4 08 add $0x8,%rsp
40128c: c3 ret
|
code-help-tutor/CT-Coursework-3-Code-Generation
| 2,834
|
tests/riscv/parser/pseudoops.s
|
WeChat: cstutorcs
QQ: 749389476
Email: tutorcs@163.com
# RUN: ../../../riscv/parser.py %s | filecheck %s
nop:
nop
# CHECK: nop:
# CHECK-NEXT: nop
li:
li t0, 0
li t0, 12345
li t0, -67890
# CHECK: li:
# CHECK-NEXT: li t0, 0
# CHECK-NEXT: li t0, 12345
# CHECK-NEXT: li t0, -67890
mv:
mv t0, t0
# CHECK: mv:
# CHECK-NEXT: mv t0, t0
not:
not t0, t0
# CHECK: not:
# CHECK-NEXT: not t0, t0
neg:
neg t0, t0
# CHECK: neg:
# CHECK-NEXT: neg t0, t0
negw:
negw t0, t0
# CHECK: negw:
# CHECK-NEXT: negw t0, t0
seqz:
seqz t0, t0
# CHECK: seqz:
# CHECK-NEXT: seqz t0, t0
snez:
snez t0, t0
# CHECK: snez:
# CHECK-NEXT: snez t0, t0
sltz:
sltz t0, t0
# CHECK: sltz:
# CHECK-NEXT: sltz t0, t0
sgtz:
sgtz t0, t0
# CHECK: sgtz:
# CHECK-NEXT: sgtz t0, t0
beqz:
beqz t0, 0
beqz t0, -1
beqz t0, 1
beqz t0, beqz
# CHECK: beqz:
# CHECK-NEXT: beqz t0, 0
# CHECK-NEXT: beqz t0, -1
# CHECK-NEXT: beqz t0, 1
# CHECK-NEXT: beqz t0, beqz
bnez:
bnez t0, 0
bnez t0, -1
bnez t0, 1
bnez t0, bnez
# CHECK: bnez:
# CHECK-NEXT: bnez t0, 0
# CHECK-NEXT: bnez t0, -1
# CHECK-NEXT: bnez t0, 1
# CHECK-NEXT: bnez t0, bnez
blez:
blez t0, 0
blez t0, -1
blez t0, 1
blez t0, blez
# CHECK: blez:
# CHECK-NEXT: blez t0, 0
# CHECK-NEXT: blez t0, -1
# CHECK-NEXT: blez t0, 1
# CHECK-NEXT: blez t0, blez
bgez:
bgez t0, 0
bgez t0, -1
bgez t0, 1
bgez t0, bgez
# CHECK: bgez:
# CHECK-NEXT: bgez t0, 0
# CHECK-NEXT: bgez t0, -1
# CHECK-NEXT: bgez t0, 1
# CHECK-NEXT: bgez t0, bgez
bltz:
bltz t0, 0
bltz t0, -1
bltz t0, 1
bltz t0, bltz
# CHECK: bltz:
# CHECK-NEXT: bltz t0, 0
# CHECK-NEXT: bltz t0, -1
# CHECK-NEXT: bltz t0, 1
# CHECK-NEXT: bltz t0, bltz
bgtz:
bgtz t0, 0
bgtz t0, -1
bgtz t0, 1
bgtz t0, bgtz
# CHECK: bgtz:
# CHECK-NEXT: bgtz t0, 0
# CHECK-NEXT: bgtz t0, -1
# CHECK-NEXT: bgtz t0, 1
# CHECK-NEXT: bgtz t0, bgtz
bgt:
bgt t0, t1, 0
bgt t0, t1, -1
bgt t0, t1, 1
bgt t0, t1, bgt
# CHECK: bgt:
# CHECK-NEXT: bgt t0, t1, 0
# CHECK-NEXT: bgt t0, t1, -1
# CHECK-NEXT: bgt t0, t1, 1
# CHECK-NEXT: bgt t0, t1, bgt
ble:
ble t0, t1, 0
ble t0, t1, -1
ble t0, t1, 1
ble t0, t1, ble
# CHECK: ble:
# CHECK-NEXT: ble t0, t1, 0
# CHECK-NEXT: ble t0, t1, -1
# CHECK-NEXT: ble t0, t1, 1
# CHECK-NEXT: ble t0, t1, ble
bgtu:
bgtu t0, t1, 0
bgtu t0, t1, -1
bgtu t0, t1, 1
bgtu t0, t1, bgtu
# CHECK: bgtu:
# CHECK-NEXT: bgtu t0, t1, 0
# CHECK-NEXT: bgtu t0, t1, -1
# CHECK-NEXT: bgtu t0, t1, 1
# CHECK-NEXT: bgtu t0, t1, bgtu
bleu:
bleu t0, t1, 0
bleu t0, t1, -1
bleu t0, t1, 1
bleu t0, t1, bleu
# CHECK: bleu:
# CHECK-NEXT: bleu t0, t1, 0
# CHECK-NEXT: bleu t0, t1, -1
# CHECK-NEXT: bleu t0, t1, 1
# CHECK-NEXT: bleu t0, t1, bleu
ret:
ret
# CHECK: ret:
# CHECK-NEXT: ret
|
code-help-tutor/CT-Coursework-3-Code-Generation
| 2,482
|
tests/riscv/parser/registers.s
|
WeChat: cstutorcs
QQ: 749389476
Email: tutorcs@163.com
# RUN: ../../../riscv/parser.py %s | filecheck %s
registers:
# CHECK: registers:
jal x0, 0
# CHECK-NEXT: jal zero, 0
jal x1, 0
# CHECK-NEXT: jal ra, 0
jal x2, 0
# CHECK-NEXT: jal sp, 0
jal x3, 0
# CHECK-NEXT: jal gp, 0
jal x4, 0
# CHECK-NEXT: jal tp, 0
jal x5, 0
# CHECK-NEXT: jal t0, 0
jal x6, 0
# CHECK-NEXT: jal t1, 0
jal x7, 0
# CHECK-NEXT: jal t2, 0
jal x8, 0
# CHECK-NEXT: jal fp, 0
jal x9, 0
# CHECK-NEXT: jal s1, 0
jal x10, 0
# CHECK-NEXT: jal a0, 0
jal x11, 0
# CHECK-NEXT: jal a1, 0
jal x12, 0
# CHECK-NEXT: jal a2, 0
jal x13, 0
# CHECK-NEXT: jal a3, 0
jal x14, 0
# CHECK-NEXT: jal a4, 0
jal x15, 0
# CHECK-NEXT: jal a5, 0
jal x16, 0
# CHECK-NEXT: jal a6, 0
jal x17, 0
# CHECK-NEXT: jal a7, 0
jal x18, 0
# CHECK-NEXT: jal s2, 0
jal x19, 0
# CHECK-NEXT: jal s3, 0
jal x20, 0
# CHECK-NEXT: jal s4, 0
jal x21, 0
# CHECK-NEXT: jal s5, 0
jal x22, 0
# CHECK-NEXT: jal s6, 0
jal x23, 0
# CHECK-NEXT: jal s7, 0
jal x24, 0
# CHECK-NEXT: jal s8, 0
jal x25, 0
# CHECK-NEXT: jal s9, 0
jal x26, 0
# CHECK-NEXT: jal s10, 0
jal x27, 0
# CHECK-NEXT: jal s11, 0
jal x28, 0
# CHECK-NEXT: jal t3, 0
jal x29, 0
# CHECK-NEXT: jal t4, 0
jal x30, 0
# CHECK-NEXT: jal t5, 0
jal x31, 0
# CHECK-NEXT: jal t6, 0
abinames:
# CHECK-NEXT: abinames:
jal zero, 0
# CHECK-NEXT: jal zero, 0
jal ra, 0
# CHECK-NEXT: jal ra, 0
jal sp, 0
# CHECK-NEXT: jal sp, 0
jal gp, 0
# CHECK-NEXT: jal gp, 0
jal tp, 0
# CHECK-NEXT: jal tp, 0
jal t0, 0
# CHECK-NEXT: jal t0, 0
jal t1, 0
# CHECK-NEXT: jal t1, 0
jal t2, 0
# CHECK-NEXT: jal t2, 0
jal fp, 0
# CHECK-NEXT: jal fp, 0
jal s0, 0
# CHECK-NEXT: jal fp, 0
jal s1, 0
# CHECK-NEXT: jal s1, 0
jal a0, 0
# CHECK-NEXT: jal a0, 0
jal a1, 0
# CHECK-NEXT: jal a1, 0
jal a2, 0
# CHECK-NEXT: jal a2, 0
jal a3, 0
# CHECK-NEXT: jal a3, 0
jal a4, 0
# CHECK-NEXT: jal a4, 0
jal a5, 0
# CHECK-NEXT: jal a5, 0
jal a6, 0
# CHECK-NEXT: jal a6, 0
jal a7, 0
# CHECK-NEXT: jal a7, 0
jal s2, 0
# CHECK-NEXT: jal s2, 0
jal s3, 0
# CHECK-NEXT: jal s3, 0
jal s4, 0
# CHECK-NEXT: jal s4, 0
jal s5, 0
# CHECK-NEXT: jal s5, 0
jal s6, 0
# CHECK-NEXT: jal s6, 0
jal s7, 0
# CHECK-NEXT: jal s7, 0
jal s8, 0
# CHECK-NEXT: jal s8, 0
jal s9, 0
# CHECK-NEXT: jal s9, 0
jal s10, 0
# CHECK-NEXT: jal s10, 0
jal s11, 0
# CHECK-NEXT: jal s11, 0
jal t3, 0
# CHECK-NEXT: jal t3, 0
jal t4, 0
# CHECK-NEXT: jal t4, 0
jal t5, 0
# CHECK-NEXT: jal t5, 0
jal t6, 0
# CHECK-NEXT: jal t6, 0
|
code-help-tutor/CT-Coursework-3-Code-Generation
| 7,079
|
tests/riscv/parser/ops.s
|
WeChat: cstutorcs
QQ: 749389476
Email: tutorcs@163.com
# RUN: ../../../riscv/parser.py %s | filecheck %s
loadbyte:
lb t0, t2
lb t0, t2, 0
lb t0, t2, 12
lb t0, t2, -34
lb t0, 0(t2)
lb t0, 56(t2)
lb t0, -78(t2)
# CHECK: loadbyte:
# CHECK-NEXT: lb t0, 0(t2)
# CHECK-NEXT: lb t0, 0(t2)
# CHECK-NEXT: lb t0, 12(t2)
# CHECK-NEXT: lb t0, -34(t2)
# CHECK-NEXT: lb t0, 0(t2)
# CHECK-NEXT: lb t0, 56(t2)
# CHECK-NEXT: lb t0, -78(t2)
loadbyteunsigned:
lbu t0, t2
lbu t0, t2, 0
lbu t0, t2, 12
lbu t0, t2, -34
lbu t0, 0(t2)
lbu t0, 56(t2)
lbu t0, -78(t2)
# CHECK: loadbyteunsigned:
# CHECK-NEXT: lbu t0, 0(t2)
# CHECK-NEXT: lbu t0, 0(t2)
# CHECK-NEXT: lbu t0, 12(t2)
# CHECK-NEXT: lbu t0, -34(t2)
# CHECK-NEXT: lbu t0, 0(t2)
# CHECK-NEXT: lbu t0, 56(t2)
# CHECK-NEXT: lbu t0, -78(t2)
loadhalf:
lh t0, t2
lh t0, t2, 0
lh t0, t2, 12
lh t0, t2, -34
lh t0, 0(t2)
lh t0, 56(t2)
lh t0, -78(t2)
# CHECK: loadhalf:
# CHECK-NEXT: lh t0, 0(t2)
# CHECK-NEXT: lh t0, 0(t2)
# CHECK-NEXT: lh t0, 12(t2)
# CHECK-NEXT: lh t0, -34(t2)
# CHECK-NEXT: lh t0, 0(t2)
# CHECK-NEXT: lh t0, 56(t2)
# CHECK-NEXT: lh t0, -78(t2)
loadhalfunsigned:
lhu t0, t2
lhu t0, t2, 0
lhu t0, t2, 12
lhu t0, t2, -34
lhu t0, 0(t2)
lhu t0, 56(t2)
lhu t0, -78(t2)
# CHECK: loadhalfunsigned:
# CHECK-NEXT: lhu t0, 0(t2)
# CHECK-NEXT: lhu t0, 0(t2)
# CHECK-NEXT: lhu t0, 12(t2)
# CHECK-NEXT: lhu t0, -34(t2)
# CHECK-NEXT: lhu t0, 0(t2)
# CHECK-NEXT: lhu t0, 56(t2)
# CHECK-NEXT: lhu t0, -78(t2)
loadword:
lw t0, t2
lw t0, t2, 0
lw t0, t2, 12
lw t0, t2, -34
lw t0, 0(t2)
lw t0, 56(t2)
lw t0, -78(t2)
# CHECK: loadword:
# CHECK-NEXT: lw t0, 0(t2)
# CHECK-NEXT: lw t0, 0(t2)
# CHECK-NEXT: lw t0, 12(t2)
# CHECK-NEXT: lw t0, -34(t2)
# CHECK-NEXT: lw t0, 0(t2)
# CHECK-NEXT: lw t0, 56(t2)
# CHECK-NEXT: lw t0, -78(t2)
storebyte:
sb t0, t2
sb t0, t2, 0
sb t0, t2, 12
sb t0, t2, -34
sb t0, 0(t2)
sb t0, 56(t2)
sb t0, -78(t2)
# CHECK: storebyte:
# CHECK-NEXT: sb t0, 0(t2)
# CHECK-NEXT: sb t0, 0(t2)
# CHECK-NEXT: sb t0, 12(t2)
# CHECK-NEXT: sb t0, -34(t2)
# CHECK-NEXT: sb t0, 0(t2)
# CHECK-NEXT: sb t0, 56(t2)
# CHECK-NEXT: sb t0, -78(t2)
storehalf:
sh t0, t2
sh t0, t2, 0
sh t0, t2, 12
sh t0, t2, -34
sh t0, 0(t2)
sh t0, 56(t2)
sh t0, -78(t2)
# CHECK: storehalf:
# CHECK-NEXT: sh t0, 0(t2)
# CHECK-NEXT: sh t0, 0(t2)
# CHECK-NEXT: sh t0, 12(t2)
# CHECK-NEXT: sh t0, -34(t2)
# CHECK-NEXT: sh t0, 0(t2)
# CHECK-NEXT: sh t0, 56(t2)
# CHECK-NEXT: sh t0, -78(t2)
storeword:
sw t0, t2
sw t0, t2, 0
sw t0, t2, 12
sw t0, t2, -34
sw t0, 0(t2)
sw t0, 56(t2)
sw t0, -78(t2)
# CHECK: storeword:
# CHECK-NEXT: sw t0, 0(t2)
# CHECK-NEXT: sw t0, 0(t2)
# CHECK-NEXT: sw t0, 12(t2)
# CHECK-NEXT: sw t0, -34(t2)
# CHECK-NEXT: sw t0, 0(t2)
# CHECK-NEXT: sw t0, 56(t2)
# CHECK-NEXT: sw t0, -78(t2)
branches:
beq t0, t2, 0
beq t0, t2, 12
beq t0, t2, -34
beq t0, t2, branches
bne t0, t2, 0
bne t0, t2, 56
bne t0, t2, -78
bne t0, t2, branches
blt t0, t2, 0
blt t0, t2, 91
blt t0, t2, -23
blt t0, t2, branches
bge t0, t2, 0
bge t0, t2, 45
bge t0, t2, -67
bge t0, t2, branches
bltu t0, t2, 0
bltu t0, t2, 89
bltu t0, t2, -12
bltu t0, t2, branches
bgeu t0, t2, 0
bgeu t0, t2, 34
bgeu t0, t2, -56
bgeu t0, t2, branches
# CHECK: branches
# CHECK-NEXT beq t0, t2, 0
# CHECK-NEXT beq t0, t2, 12
# CHECK-NEXT beq t0, t2, -34
# CHECK-NEXT beq t0, t2, branches
# CHECK-NEXT bne t0, t2, 0
# CHECK-NEXT bne t0, t2, 56
# CHECK-NEXT bne t0, t2, -78
# CHECK-NEXT bne t0, t2, branches
# CHECK-NEXT blt t0, t2, 0
# CHECK-NEXT blt t0, t2, 91
# CHECK-NEXT blt t0, t2, -23
# CHECK-NEXT blt t0, t2, branches
# CHECK-NEXT bge t0, t2, 0
# CHECK-NEXT bge t0, t2, 45
# CHECK-NEXT bge t0, t2, -67
# CHECK-NEXT bge t0, t2, branches
# CHECK-NEXT bltu t0, t2, 0
# CHECK-NEXT bltu t0, t2, 89
# CHECK-NEXT bltu t0, t2, -12
# CHECK-NEXT bltu t0, t2, branches
# CHECK-NEXT bgeu t0, t2, 0
# CHECK-NEXT bgeu t0, t2, 34
# CHECK-NEXT bgeu t0, t2, -56
# CHECK-NEXT bgeu t0, t2, branches
shifts:
sll t0, t1, t2
slli t0, t1, 0
slli t0, t1, 1
slli t0, t1, 2
srl t0, t1, t2
srli t0, t1, 0
srli t0, t1, 1
srli t0, t1, 2
sra t0, t1, t2
srai t0, t1, 0
srai t0, t1, 1
srai t0, t1, 2
# CHECK: shifts:
# CHECK-NEXT: sll t0, t1, t2
# CHECK-NEXT: slli t0, t1, 0
# CHECK-NEXT: slli t0, t1, 1
# CHECK-NEXT: slli t0, t1, 2
# CHECK-NEXT: srl t0, t1, t2
# CHECK-NEXT: srli t0, t1, 0
# CHECK-NEXT: srli t0, t1, 1
# CHECK-NEXT: srli t0, t1, 2
# CHECK-NEXT: sra t0, t1, t2
# CHECK-NEXT: srai t0, t1, 0
# CHECK-NEXT: srai t0, t1, 1
# CHECK-NEXT: srai t0, t1, 2
arithmetic:
add t0, t1, t2
addi t0, t1, 0
addi t0, t1, -12
addi t0, t1, -34
sub t0, t1, t2
lui t0, 12
lui t0, -34
auipc t0, 12
auipc t0, -34
# CHECK: arithmetic:
# CHECK-NEXT: add t0, t1, t2
# CHECK-NEXT: addi t0, t1, 0
# CHECK-NEXT: addi t0, t1, -12
# CHECK-NEXT: addi t0, t1, -34
# CHECK-NEXT: sub t0, t1, t2
# CHECK-NEXT: lui t0, 12
# CHECK-NEXT: lui t0, -34
# CHECK-NEXT: auipc t0, 12
# CHECK-NEXT: auipc t0, -34
logical:
xor t0, t1, t2
xori t0, t1, 0
xori t0, t1, -12
xori t0, t1, -34
or t0, t1, t2
ori t0, t1, 0
ori t0, t1, -12
ori t0, t1, -34
and t0, t1, t2
andi t0, t1, 0
andi t0, t1, -12
andi t0, t1, -34
# CHECK: logical:
# CHECK-NEXT: xor t0, t1, t2
# CHECK-NEXT: xori t0, t1, 0
# CHECK-NEXT: xori t0, t1, -12
# CHECK-NEXT: xori t0, t1, -34
# CHECK-NEXT: or t0, t1, t2
# CHECK-NEXT: ori t0, t1, 0
# CHECK-NEXT: ori t0, t1, -12
# CHECK-NEXT: ori t0, t1, -34
# CHECK-NEXT: and t0, t1, t2
# CHECK-NEXT: andi t0, t1, 0
# CHECK-NEXT: andi t0, t1, -12
# CHECK-NEXT: andi t0, t1, -34
compare:
slt t0, t1, t2
slti t0, t1, 0
slti t0, t1, -12
slti t0, t1, -34
sltu t0, t1, t2
sltiu t0, t1, 0
sltiu t0, t1, -12
sltiu t0, t1, -34
# CHECK: compare:
# CHECK-NEXT: slt t0, t1, t2
# CHECK-NEXT: slti t0, t1, 0
# CHECK-NEXT: slti t0, t1, -12
# CHECK-NEXT: slti t0, t1, -34
# CHECK-NEXT: sltu t0, t1, t2
# CHECK-NEXT: sltiu t0, t1, 0
# CHECK-NEXT: sltiu t0, t1, -12
# CHECK-NEXT: sltiu t0, t1, -34
jumpandlink:
jal t0, 0
jal t0, -12
jal t0, 32
jal t0, jumpandlink
jalr t0, t1, 0
jalr t0, t1, -12
jalr t0, t1, 32
jalr t0, t1, jumpandlink
# CHECK: jumpandlink:
# CHECK-NEXT: jal t0, 0
# CHECK-NEXT: jal t0, -12
# CHECK-NEXT: jal t0, 32
# CHECK-NEXT: jal t0, jumpandlink
# CHECK-NEXT: jalr t0, t1, 0
# CHECK-NEXT: jalr t0, t1, -12
# CHECK-NEXT: jalr t0, t1, 32
# CHECK-NEXT: jalr t0, t1, jumpandlink
multiplicative:
mul t0, t1, t2
mulh t0, t1, t2
mulhsu t0, t1, t2
mulhu t0, t1, t2
mulhu t0, t1, t2
div t0, t1, t2
divu t0, t1, t2
rem t0, t1, t2
remu t0, t1, t2
# CHECK: multiplicative:
# CHECK-NEXT: mul t0, t1, t2
# CHECK-NEXT: mulh t0, t1, t2
# CHECK-NEXT: mulhsu t0, t1, t2
# CHECK-NEXT: mulhu t0, t1, t2
# CHECK-NEXT: mulhu t0, t1, t2
# CHECK-NEXT: div t0, t1, t2
# CHECK-NEXT: divu t0, t1, t2
# CHECK-NEXT: rem t0, t1, t2
# CHECK-NEXT: remu t0, t1, t2
system:
ecall
ebreak
# CHECK: system:
# CHECK-NEXT: ecall
# CHECK-NEXT: ebreak
|
code-help-tutor/CT-Coursework-3-Code-Generation
| 1,126
|
tests/riscv/lexer/ops.s
|
WeChat: cstutorcs
QQ: 749389476
Email: tutorcs@163.com
# RUN: riscv-lexer %s | filecheck %s
add a0, a0, a0
# CHECK: SYMBOL:add
# CHECK-NEXT: SYMBOL:a0
# CHECK-NEXT: COMMA
# CHECK-NEXT: SYMBOL:a0
# CHECK-NEXT: COMMA
# CHECK-NEXT: SYMBOL:a0
# CHECK-NEXT: NEWLINE
add a0, a0, 1
# CHECK: SYMBOL:add
# CHECK-NEXT: SYMBOL:a0
# CHECK-NEXT: COMMA
# CHECK-NEXT: SYMBOL:a0
# CHECK-NEXT: COMMA
# CHECK-NEXT: INTEGER:1
# CHECK-NEXT: NEWLINE
add a0, a0, -1
# CHECK: SYMBOL:add
# CHECK-NEXT: SYMBOL:a0
# CHECK-NEXT: COMMA
# CHECK-NEXT: SYMBOL:a0
# CHECK-NEXT: COMMA
# CHECK-NEXT: MINUS
# CHECK-NEXT: INTEGER:1
# CHECK-NEXT: NEWLINE
lw t0, t2
# CHECK: SYMBOL:lw
# CHECK-NEXT: SYMBOL:t0
# CHECK-NEXT: COMMA
# CHECK-NEXT: SYMBOL:t2
# CHECK-NEXT: NEWLINE
lw t0, 0(t2)
# CHECK: SYMBOL:lw
# CHECK-NEXT: SYMBOL:t0
# CHECK-NEXT: COMMA
# CHECK-NEXT: INTEGER:0
# CHECK-NEXT: LPAREN
# CHECK-NEXT: SYMBOL:t2
# CHECK-NEXT: RPAREN
# CHECK-NEXT: NEWLINE
lw t0, 8(t2)
# CHECK: SYMBOL:lw
# CHECK-NEXT: SYMBOL:t0
# CHECK-NEXT: COMMA
# CHECK-NEXT: INTEGER:8
# CHECK-NEXT: LPAREN
# CHECK-NEXT: SYMBOL:t2
# CHECK-NEXT: RPAREN
# CHECK-NEXT: NEWLINE
|
CoderEx24/gyro
| 2,137
|
firmware/main.S
|
#define __SFR_OFFSET 0
#include <avr/io.h>
.equ GYRO_ADDR, 0b11010001
.equ GYRO_XL, 0x27
.equ GYRO_XH, 0x29
.equ GYRO_YL, 0x2A
.equ GYRO_YH, 0x2B
.equ GYRO_ZL, 0x2C
.equ GYRO_ZH, 0x2D
.global main
main:
sbi DDRB, 0
ldi r16, 1<<TXEN
out UCSRB, r16
ldi r16, 1<<UCSZ1 | 1<<UCSZ0 | 1<<URSEL
out UCSRC, r16
ldi r16, 0x33
out UBRRL, r16
ldi r16, 0
out TWSR, r16
ldi r16, 0x47
out TWBR, r16
ldi r16, 1<<TWEN
out TWCR, r16
loop:
ldi ZH, hi8(start_str)
ldi ZL, lo8(start_str)
rcall log_string
rcall i2c_start
ldi ZH, hi8(accessing_str)
ldi ZL, lo8(accessing_str)
rcall log_string
ldi r20, GYRO_ADDR
rcall i2c_write
ldi ZH, hi8(reading_str)
ldi ZL, lo8(reading_str)
rcall log_string
ldi r20, 1|(GYRO_YL<<1)
rcall i2c_write
rcall i2c_read
mov r20, r31
rcall log_letter
ldi r20, '\n'
rcall log_letter
rjmp loop
i2c_start:
ldi r31, (1<<TWINT)|(1<<TWSTA)|(1<<TWEN)
out TWCR, r31
i2c_start_wait:
in r31, TWCR
sbrs r31, TWINT
rjmp i2c_start_wait
ret
i2c_write: ; (r20)
out TWDR, r20
ldi r31, (1<<TWINT)|(1<<TWEN)
out TWCR, r31
i2c_write_wait:
in r31, TWCR
sbrs r31, TWINT
rjmp i2c_write_wait
ret
i2c_read: ; (out r31)
ldi r31, (1<<TWINT)|(1<<TWEN)
out TWCR, r31
i2c_read_wait:
in r31, TWCR
sbrs r31, TWINT
rjmp i2c_read_wait
in r31, TWDR
ret
i2c_stop:
ldi r31, (1<<TWINT)|(1<<TWSTO)|(1<<TWEN)
out TWCR, r31
ret
log_letter: ; (r20)
log_letter_loop:
sbis UCSRA, UDRE
rjmp log_letter_loop
out UDR, r20
ret
log_string:
log_string_loop:
lpm r20, Z+
rcall log_letter
cpi r20, 0
brne log_string_loop
ret
delay:
ldi r25, 0xA0
out TCNT0, r25
ldi r25, 0x05
out TCCR0, r25
timer_overflow_loop:
in r25, TIFR
sbrs r25, TOV0
rjmp timer_overflow_loop
ldi r25, 0
out TCCR0, r25
ldi r25, 1<<TOV0
out TIFR, r25
ret
start_str: .asciz "Starting...\n"
accessing_str: .asciz "Addressing Slave...\n"
reading_str: .asciz "Reading...\n"
|
Cogware/CogwareCopilot
| 2,148
|
src/_arch/aarch64/cpu/boot.s
|
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_REL x0, __bss_start
ADR_REL x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_prepare_rust
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Prepare the jump to Rust code.
.L_prepare_rust:
// Set the stack pointer.
ADR_REL x0, __boot_core_stack_end_exclusive
mov sp, x0
// Read the CPU's timer counter frequency and store it in ARCH_TIMER_COUNTER_FREQUENCY.
// Abort if the frequency read back as 0.
ADR_REL x1, ARCH_TIMER_COUNTER_FREQUENCY // provided by aarch64/time.rs
mrs x2, CNTFRQ_EL0
cmp x2, xzr
b.eq .L_parking_loop
str w2, [x1]
// Jump to Rust code.
b _start_rust
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
Cogware/CogwareCopilot
| 2,148
|
X1_JTAG_boot/src/_arch/aarch64/cpu/boot.s
|
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2023 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x1, MPIDR_EL1
and x1, x1, {CONST_CORE_ID_MASK}
ldr x2, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x1, x2
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_REL x0, __bss_start
ADR_REL x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_prepare_rust
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Prepare the jump to Rust code.
.L_prepare_rust:
// Set the stack pointer.
ADR_REL x0, __boot_core_stack_end_exclusive
mov sp, x0
// Read the CPU's timer counter frequency and store it in ARCH_TIMER_COUNTER_FREQUENCY.
// Abort if the frequency read back as 0.
ADR_REL x1, ARCH_TIMER_COUNTER_FREQUENCY // provided by aarch64/time.rs
mrs x2, CNTFRQ_EL0
cmp x2, xzr
b.eq .L_parking_loop
str w2, [x1]
// Jump to Rust code.
b _start_rust
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
coltonisgod234/PixelPulse-6502
| 1,106
|
asm/source.s
|
.segment "CODE"
; Subroutine to refresh display
refresh_display:
lda #%00000001 ; Load immediate value %00000001 into accumulator
sta $3024 ; Store accumulator value into memory address $3024
rts ; Return from subroutine
; Reset subroutine
rst:
lda #$00 ; Initialize X register to 0
jmp loopy
loopy:
iny ; Increment Y register
cpy #$FF ; Compare Y to 255 (hexadecimal $FF)
bne loopy ; Branch back to loopy if Y is not equal to 255
sta $1000, Y ; Store X register value into memory address $1000 + Y
jmp refresh_display ; Jump to refresh_display subroutine
jmp rst ; Infinite loop (jump to rst)
nmi:
lda #$00 ; Debug statement
rti
irq:
lda #$10 ; Debug statement
rti
.segment "VECTORS"
.word nmi ; Non-maskable interrupt vector (address $FFFA)
.word rst ; Reset interrupt vector (address $FFFC)
.word irq ; Interrupt request vector (address $FFFE)
|
Conless/remire
| 5,079
|
crates/kernel/src/link_app.S
|
.align 3
.section .data
.global _num_app
_num_app:
.quad 17
.quad app_0_start
.quad app_1_start
.quad app_2_start
.quad app_3_start
.quad app_4_start
.quad app_5_start
.quad app_6_start
.quad app_7_start
.quad app_8_start
.quad app_9_start
.quad app_10_start
.quad app_11_start
.quad app_12_start
.quad app_13_start
.quad app_14_start
.quad app_15_start
.quad app_16_start
.quad app_16_end
.global _app_names
_app_names:
.string "exit"
.string "fantastic_text"
.string "forkexec"
.string "forktest"
.string "forktest2"
.string "forktest_simple"
.string "forktree"
.string "hello_world"
.string "initproc"
.string "matrix"
.string "sleep"
.string "sleep_simple"
.string "stack_overflow"
.string "user_shell"
.string "usertests"
.string "usertests-simple"
.string "yield"
.section .data
.global app_0_start
.global app_0_end
.align 3
app_0_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/exit"
app_0_end:
.section .data
.global app_1_start
.global app_1_end
.align 3
app_1_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/fantastic_text"
app_1_end:
.section .data
.global app_2_start
.global app_2_end
.align 3
app_2_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/forkexec"
app_2_end:
.section .data
.global app_3_start
.global app_3_end
.align 3
app_3_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/forktest"
app_3_end:
.section .data
.global app_4_start
.global app_4_end
.align 3
app_4_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/forktest2"
app_4_end:
.section .data
.global app_5_start
.global app_5_end
.align 3
app_5_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/forktest_simple"
app_5_end:
.section .data
.global app_6_start
.global app_6_end
.align 3
app_6_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/forktree"
app_6_end:
.section .data
.global app_7_start
.global app_7_end
.align 3
app_7_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/hello_world"
app_7_end:
.section .data
.global app_8_start
.global app_8_end
.align 3
app_8_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/initproc"
app_8_end:
.section .data
.global app_9_start
.global app_9_end
.align 3
app_9_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/matrix"
app_9_end:
.section .data
.global app_10_start
.global app_10_end
.align 3
app_10_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/sleep"
app_10_end:
.section .data
.global app_11_start
.global app_11_end
.align 3
app_11_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/sleep_simple"
app_11_end:
.section .data
.global app_12_start
.global app_12_end
.align 3
app_12_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/stack_overflow"
app_12_end:
.section .data
.global app_13_start
.global app_13_end
.align 3
app_13_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/user_shell"
app_13_end:
.section .data
.global app_14_start
.global app_14_end
.align 3
app_14_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/usertests"
app_14_end:
.section .data
.global app_15_start
.global app_15_end
.align 3
app_15_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/usertests-simple"
app_15_end:
.section .data
.global app_16_start
.global app_16_end
.align 3
app_16_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/yield"
app_16_end:
.align 3
.section .data
.global _num_service
_num_service:
.quad 1
.quad service_0_start
.quad service_0_end
.global _service_names
_service_names:
.string "pm"
.section .data
.global service_0_start
.global service_0_end
.align 3
service_0_start:
.incbin "/home/conless/Desktop/study/os/sjtu-cs2952/remire/target/riscv64gc-unknown-none-elf/debug/pm"
service_0_end:
|
Conless/remire
| 1,495
|
crates/kernel/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
# Global trap handler
#
# fn __alltraps(ctx: &mut TrapContext) -> !
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
# change sp to kernel stack
csrrw sp, sscratch, sp
# now sp points to *TrapContext of user space
# and sscratch points to user stack
sd ra, 1*8(sp)
sd gp, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp) # TrapContext::status
sd t1, 33*8(sp) # TrapContext::pc
csrr t2, sscratch
sd t2, 2*8(sp) # TrapContext::sp
ld t0, 34*8(sp) # TrapContext::kernel_satp
ld t1, 36*8(sp) # TrapContext::trap_handler
ld sp, 35*8(sp) # TrapContext::kernel_sp
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
# Restore from trap
#
# fn __restore(ctx: *mut TrapContext, satp: usize) -> !
__restore:
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore registers
ld ra, 1*8(sp)
ld gp, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
Conless/remire
| 1,368
|
bios/src/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
# Global trap handler
#
# fn __alltraps(ctx: &mut TrapContext) -> !
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
# change sp to machine stack
csrrw sp, mscratch, sp
# now sp points to *TrapContext
# and sscratch points to user stack
sd ra, 1*8(sp)
sd gp, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, mepc
csrr t1, satp
sd t0, 32*8(sp) # TrapContext::pc
sd t1, 33*8(sp) # TrapContext::satp
csrr t2, mscratch
sd t2, 2*8(sp) # TrapContext::sp
ld t1, 35*8(sp) # TrapContext::trap_handler
ld sp, 34*8(sp) # TrapContext::kernel_sp
# switch to machine mode
csrw satp, 0
sfence.vma
# jump to trap_handler
jr t1
# Restore from trap
#
# fn __restore(ctx: *mut TrapContext) -> !
__restore:
csrw mscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore mepc/satp
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw mepc, t0
csrw satp, t1
# restore registers
ld ra, 1*8(sp)
ld gp, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
mret
|
cppbear/RuMono
| 11,809
|
library/std/src/sys/pal/sgx/abi/entry.S
|
/* This symbol is used at runtime to figure out the virtual address that the */
/* enclave is loaded at. */
.section absolute
.global IMAGE_BASE
IMAGE_BASE:
.section ".note.x86_64-fortanix-unknown-sgx", "", @note
.align 4
.long 1f - 0f /* name length (not including padding) */
.long 3f - 2f /* desc length (not including padding) */
.long 1 /* type = NT_VERSION */
0: .asciz "toolchain-version" /* name */
1: .align 4
2: .long 1 /* desc - toolchain version number, 32-bit LE */
3: .align 4
.section .rodata
/* The XSAVE area needs to be a large chunk of readable memory, but since we are */
/* going to restore everything to its initial state (XSTATE_BV=0), only certain */
/* parts need to have a defined value. In particular: */
/* */
/* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */
/* RFBM[2] is set, regardless of the value of XSTATE_BV */
/* * XSAVE header */
.align 64
.Lxsave_clear:
.org .+24
.Lxsave_mxcsr:
.short 0x1fbf
/* We can store a bunch of data in the gap between MXCSR and the XSAVE header */
/* The following symbols point at read-only data that will be filled in by the */
/* post-linker. */
/* When using this macro, don't forget to adjust the linker version script! */
.macro globvar name:req size:req
.global \name
.protected \name
.align \size
.size \name , \size
\name :
.org .+\size
.endm
/* The base address (relative to enclave start) of the heap area */
globvar HEAP_BASE 8
/* The heap size in bytes */
globvar HEAP_SIZE 8
/* Value of the RELA entry in the dynamic table */
globvar RELA 8
/* Value of the RELACOUNT entry in the dynamic table */
globvar RELACOUNT 8
/* The enclave size in bytes */
globvar ENCLAVE_SIZE 8
/* The base address (relative to enclave start) of the enclave configuration area */
globvar CFGDATA_BASE 8
/* Non-zero if debugging is enabled, zero otherwise */
globvar DEBUG 1
/* The base address (relative to enclave start) of the enclave text section */
globvar TEXT_BASE 8
/* The size in bytes of enclave text section */
globvar TEXT_SIZE 8
/* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_OFFSET 8
/* The size in bytes of enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_LEN 8
/* The base address (relative to enclave start) of the enclave .eh_frame section */
globvar EH_FRM_OFFSET 8
/* The size in bytes of enclave .eh_frame section */
globvar EH_FRM_LEN 8
.org .Lxsave_clear+512
.Lxsave_header:
.int 0, 0 /* XSTATE_BV */
.int 0, 0 /* XCOMP_BV */
.org .+48 /* reserved bits */
.data
.Laborted:
.byte 0
/* TCS local storage section */
.equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */
.equ tcsls_flags, 0x08 /* initialized by loader */
.equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */
.equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */
/* 14 unused bits */
.equ tcsls_user_fcw, 0x0a
.equ tcsls_user_mxcsr, 0x0c
.equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */
.equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */
.equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */
.equ tcsls_user_rsp, 0x28
.equ tcsls_user_retip, 0x30
.equ tcsls_user_rbp, 0x38
.equ tcsls_user_r12, 0x40
.equ tcsls_user_r13, 0x48
.equ tcsls_user_r14, 0x50
.equ tcsls_user_r15, 0x58
.equ tcsls_tls_ptr, 0x60
.equ tcsls_tcs_addr, 0x68
.macro load_tcsls_flag_secondary_bool reg:req comments:vararg
.ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */
.abort
.endif
mov $(1<<tcsls_flag_secondary),%e\reg
and %gs:tcsls_flags,%\reg
.endm
/* We place the ELF entry point in a separate section so it can be removed by
elf2sgxs */
.section .text_no_sgx, "ax"
.Lelf_entry_error_msg:
.ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n"
.Lelf_entry_error_msg_end:
.global elf_entry
.type elf_entry,function
elf_entry:
/* print error message */
movq $2,%rdi /* write to stderr (fd 2) */
lea .Lelf_entry_error_msg(%rip),%rsi
movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx
.Lelf_entry_call:
movq $1,%rax /* write() syscall */
syscall
test %rax,%rax
jle .Lelf_exit /* exit on error */
add %rax,%rsi
sub %rax,%rdx /* all chars written? */
jnz .Lelf_entry_call
.Lelf_exit:
movq $60,%rax /* exit() syscall */
movq $1,%rdi /* exit code 1 */
syscall
ud2 /* should not be reached */
/* end elf_entry */
/* This code needs to be called *after* the enclave stack has been setup. */
/* There are 3 places where this needs to happen, so this is put in a macro. */
.macro entry_sanitize_final
/* Sanitize rflags received from user */
/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */
/* - AC flag: AEX on misaligned memory accesses leaks side channel info */
pushfq
andq $~0x40400, (%rsp)
popfq
/* check for abort */
bt $0,.Laborted(%rip)
jc .Lreentry_panic
.endm
.text
.global sgx_entry
.type sgx_entry,function
sgx_entry:
/* save user registers */
mov %rcx,%gs:tcsls_user_retip
mov %rsp,%gs:tcsls_user_rsp
mov %rbp,%gs:tcsls_user_rbp
mov %r12,%gs:tcsls_user_r12
mov %r13,%gs:tcsls_user_r13
mov %r14,%gs:tcsls_user_r14
mov %r15,%gs:tcsls_user_r15
mov %rbx,%gs:tcsls_tcs_addr
stmxcsr %gs:tcsls_user_mxcsr
fnstcw %gs:tcsls_user_fcw
/* check for debug buffer pointer */
testb $0xff,DEBUG(%rip)
jz .Lskip_debug_init
mov %r10,%gs:tcsls_debug_panic_buf_ptr
.Lskip_debug_init:
/* reset cpu state */
mov %rdx, %r10
mov $-1, %rax
mov $-1, %rdx
xrstor .Lxsave_clear(%rip)
lfence
mov %r10, %rdx
/* check if returning from usercall */
mov %gs:tcsls_last_rsp,%r11
test %r11,%r11
jnz .Lusercall_ret
/* setup stack */
mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */
/* here. This is fixed below under "adjust stack". */
/* check for thread init */
bts $tcsls_flag_init_once,%gs:tcsls_flags
jc .Lskip_init
/* adjust stack */
lea IMAGE_BASE(%rip),%rax
add %rax,%rsp
mov %rsp,%gs:tcsls_tos
entry_sanitize_final
/* call tcs_init */
/* store caller-saved registers in callee-saved registers */
mov %rdi,%rbx
mov %rsi,%r12
mov %rdx,%r13
mov %r8,%r14
mov %r9,%r15
load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */
call tcs_init
/* reload caller-saved registers */
mov %rbx,%rdi
mov %r12,%rsi
mov %r13,%rdx
mov %r14,%r8
mov %r15,%r9
jmp .Lafter_init
.Lskip_init:
entry_sanitize_final
.Lafter_init:
/* call into main entry point */
load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */
call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */
mov %rax,%rsi /* RSI = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
xor %rdi,%rdi /* RDI = normal exit */
.Lexit:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set later */
/* RCX overwritten by ENCLU */
/* RDX contains return value */
/* RSP set later */
/* RBP set later */
/* RDI contains exit mode */
/* RSI contains return value */
xor %r8,%r8
xor %r9,%r9
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
.Lsgx_exit:
/* clear extended register state */
mov %rdx, %rcx /* save RDX */
mov $-1, %rax
mov %rax, %rdx
xrstor .Lxsave_clear(%rip)
mov %rcx, %rdx /* restore RDX */
/* clear flags */
pushq $0
popfq
/* restore user registers */
mov %gs:tcsls_user_r12,%r12
mov %gs:tcsls_user_r13,%r13
mov %gs:tcsls_user_r14,%r14
mov %gs:tcsls_user_r15,%r15
mov %gs:tcsls_user_retip,%rbx
mov %gs:tcsls_user_rsp,%rsp
mov %gs:tcsls_user_rbp,%rbp
fldcw %gs:tcsls_user_fcw
ldmxcsr %gs:tcsls_user_mxcsr
/* exit enclave */
mov $0x4,%eax /* EEXIT */
enclu
/* end sgx_entry */
.Lreentry_panic:
orq $8,%rsp
jmp abort_reentry
/* This *MUST* be called with 6 parameters, otherwise register information */
/* might leak! */
.global usercall
usercall:
test %rcx,%rcx /* check `abort` function argument */
jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */
jmp .Lusercall_save_state /* non-aborting usercall */
.Lusercall_abort:
/* set aborted bit */
movb $1,.Laborted(%rip)
/* save registers in DEBUG mode, so that debugger can reconstruct the stack */
testb $0xff,DEBUG(%rip)
jz .Lusercall_noreturn
.Lusercall_save_state:
/* save callee-saved state */
push %r15
push %r14
push %r13
push %r12
push %rbp
push %rbx
sub $8, %rsp
fstcw 4(%rsp)
stmxcsr (%rsp)
movq %rsp,%gs:tcsls_last_rsp
.Lusercall_noreturn:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set by sgx_exit */
/* RCX overwritten by ENCLU */
/* RDX contains parameter */
/* RSP set by sgx_exit */
/* RBP set by sgx_exit */
/* RDI contains parameter */
/* RSI contains parameter */
/* R8 contains parameter */
/* R9 contains parameter */
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
/* extended registers/flags cleared by sgx_exit */
/* exit */
jmp .Lsgx_exit
.Lusercall_ret:
movq $0,%gs:tcsls_last_rsp
/* restore callee-saved state, cf. "save" above */
mov %r11,%rsp
/* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */
/* vector instructions is used. We omit the lfence here as one is required before */
/* the jmp instruction anyway. */
ldmxcsr (%rsp)
fldcw 4(%rsp)
add $8, %rsp
entry_sanitize_final
pop %rbx
pop %rbp
pop %r12
pop %r13
pop %r14
pop %r15
/* return */
mov %rsi,%rax /* RAX = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
pop %r11
lfence
jmp *%r11
/*
The following functions need to be defined externally:
```
// Called by entry code on re-entry after exit
extern "C" fn abort_reentry() -> !;
// Called once when a TCS is first entered
extern "C" fn tcs_init(secondary: bool);
// Standard TCS entrypoint
extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64);
```
*/
.global get_tcs_addr
get_tcs_addr:
mov %gs:tcsls_tcs_addr,%rax
pop %r11
lfence
jmp *%r11
.global get_tls_ptr
get_tls_ptr:
mov %gs:tcsls_tls_ptr,%rax
pop %r11
lfence
jmp *%r11
.global set_tls_ptr
set_tls_ptr:
mov %rdi,%gs:tcsls_tls_ptr
pop %r11
lfence
jmp *%r11
.global take_debug_panic_buf_ptr
take_debug_panic_buf_ptr:
xor %rax,%rax
xchg %gs:tcsls_debug_panic_buf_ptr,%rax
pop %r11
lfence
jmp *%r11
|
CrazyDave999/Compiler-2024
| 4,391
|
builtin1.s
|
.text
.globl print
.p2align 1
print:
mv a1, a0
lui a0, %hi(.L.str)
addi a0, a0, %lo(.L.str)
tail printf
.Lfunc_end0:
.globl println
.p2align 1
println:
mv a1, a0
lui a0, %hi(.L.str.1)
addi a0, a0, %lo(.L.str.1)
tail printf
.Lfunc_end1:
.globl printInt
.p2align 1
printInt:
mv a1, a0
lui a0, %hi(.L.str.2)
addi a0, a0, %lo(.L.str.2)
tail printf
.Lfunc_end2:
.globl printlnInt
.p2align 1
printlnInt:
mv a1, a0
lui a0, %hi(.L.str.3)
addi a0, a0, %lo(.L.str.3)
tail printf
.Lfunc_end3:
.globl getString
.p2align 1
getString:
addi sp, sp, -16
sw ra, 12(sp)
lui a0, 1
call malloc
mv a1, a0
sw a1, 8(sp)
lui a0, %hi(.L.str)
addi a0, a0, %lo(.L.str)
call scanf
lw a0, 8(sp)
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end4:
.globl getInt
.p2align 1
getInt:
addi sp, sp, -16
sw ra, 12(sp)
lui a0, %hi(.L.str.2)
addi a0, a0, %lo(.L.str.2)
addi a1, sp, 8
call scanf
lw a0, 8(sp)
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end5:
.globl toString
.p2align 1
toString:
addi sp, sp, -16
sw ra, 12(sp)
sw a0, 4(sp)
li a0, 64
call malloc
lw a2, 4(sp)
sw a0, 8(sp)
lui a1, %hi(.L.str.2)
addi a1, a1, %lo(.L.str.2)
call sprintf
lw a0, 8(sp)
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end6:
.globl CrazyDave..boolToString
.p2align 1
CrazyDave..boolToString:
addi sp, sp, -16
lui a1, %hi(.L.str.5)
addi a1, a1, %lo(.L.str.5)
sw a1, 8(sp)
lui a1, %hi(.L.str.4)
addi a2, a1, %lo(.L.str.4)
li a1, 0
sw a2, 12(sp)
bne a0, a1, .LBB7_2
lw a0, 8(sp)
sw a0, 12(sp)
.LBB7_2:
lw a0, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end7:
.globl CrazyDave..AllocArray
.p2align 1
CrazyDave..AllocArray:
addi sp, sp, -16
sw ra, 12(sp)
sw a0, 8(sp)
slli a0, a0, 2
addi a0, a0, 4
call malloc
lw a1, 8(sp)
sw a1, 0(a0)
addi a0, a0, 4
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end8:
.globl CrazyDave..GetArraySize
.p2align 1
CrazyDave..GetArraySize:
lw a0, -4(a0)
ret
.Lfunc_end9:
.globl string.length
.p2align 1
string.length:
tail strlen
.Lfunc_end10:
.globl string.substring
.p2align 1
string.substring:
addi sp, sp, -32
sw ra, 28(sp)
sw a1, 12(sp)
sw a0, 16(sp)
sub a0, a2, a1
sw a0, 20(sp)
addi a0, a0, 2
call malloc
lw a3, 12(sp)
lw a1, 16(sp)
lw a2, 20(sp)
sw a0, 24(sp)
add a1, a1, a3
call memcpy
lw a1, 20(sp)
lw a0, 24(sp)
add a2, a0, a1
li a1, 0
sb a1, 0(a2)
lw ra, 28(sp)
addi sp, sp, 32
ret
.Lfunc_end11:
.globl string.parseInt
.p2align 1
string.parseInt:
addi sp, sp, -16
sw ra, 12(sp)
lui a1, %hi(.L.str.2)
addi a1, a1, %lo(.L.str.2)
addi a2, sp, 8
call sscanf
lw a0, 8(sp)
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end12:
.globl string.ord
.p2align 1
string.ord:
add a0, a0, a1
lbu a0, 0(a0)
ret
.Lfunc_end13:
.globl string.add
.p2align 1
string.add:
addi sp, sp, -32
sw ra, 28(sp)
sw a1, 16(sp)
sw a0, 4(sp)
call strlen
mv a1, a0
lw a0, 16(sp)
sw a1, 8(sp)
call strlen
lw a1, 8(sp)
sw a0, 12(sp)
add a0, a0, a1
sw a0, 20(sp)
addi a0, a0, 1
call malloc
lw a1, 4(sp)
lw a2, 8(sp)
sw a0, 24(sp)
call memcpy
lw a3, 8(sp)
lw a2, 12(sp)
lw a1, 16(sp)
lw a0, 24(sp)
add a0, a0, a3
addi a2, a2, 1
call memcpy
lw a1, 20(sp)
lw a0, 24(sp)
add a2, a0, a1
li a1, 0
sb a1, 0(a2)
lw ra, 28(sp)
addi sp, sp, 32
ret
.Lfunc_end14:
.globl string.eq
.p2align 1
string.eq:
addi sp, sp, -16
sw ra, 12(sp)
call strcmp
seqz a0, a0
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end15:
.globl string.ne
.p2align 1
string.ne:
addi sp, sp, -16
sw ra, 12(sp)
call strcmp
snez a0, a0
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end16:
.globl string.lt
.p2align 1
string.lt:
addi sp, sp, -16
sw ra, 12(sp)
call strcmp
srli a0, a0, 31
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end17:
.globl string.le
.p2align 1
string.le:
addi sp, sp, -16
sw ra, 12(sp)
call strcmp
slti a0, a0, 1
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end18:
.globl string.gt
.p2align 1
string.gt:
addi sp, sp, -16
sw ra, 12(sp)
call strcmp
mv a1, a0
li a0, 0
slt a0, a0, a1
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end19:
.globl string.ge
.p2align 1
string.ge:
addi sp, sp, -16
sw ra, 12(sp)
call strcmp
not a0, a0
srli a0, a0, 31
lw ra, 12(sp)
addi sp, sp, 16
ret
.Lfunc_end20:
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%s"
.L.str.1:
.asciz "%s\n"
.L.str.2:
.asciz "%d"
.L.str.3:
.asciz "%d\n"
.L.str.4:
.asciz "true"
.L.str.5:
.asciz "false"
|
Crispigt/arm-SME-QKV
| 24,159
|
QKV-code/sve_asm.s
|
.arch armv9.2-a+crc+sme2
.file "naive_mm_realdata.c"
.text
.align 2
.p2align 5,,15
.global softmax_naive
.type softmax_naive, %function
softmax_naive:
.LFB22:
.cfi_startproc
cmp w1, 0
ble .L18
stp x29, x30, [sp, -128]!
.cfi_def_cfa_offset 128
.cfi_offset 29, -128
.cfi_offset 30, -120
mov x29, sp
stp x21, x22, [sp, 32]
.cfi_offset 21, -96
.cfi_offset 22, -88
sub w21, w2, #2
mov x22, x0
add x0, x0, 8
stp x23, x24, [sp, 48]
.cfi_offset 23, -80
.cfi_offset 24, -72
sbfiz x23, x2, 2, 32
mov w24, w1
add x21, x0, w21, uxtw 2
mov w1, 58350
mov w0, 58350
stp x19, x20, [sp, 16]
.cfi_offset 19, -112
.cfi_offset 20, -104
add x20, x22, x23
movk w1, 0x6c, lsl 16
stp x25, x26, [sp, 64]
.cfi_offset 25, -64
.cfi_offset 26, -56
mov w25, w2
movk w0, 0x806c, lsl 16
mov w26, 0
stp d11, d12, [sp, 80]
.cfi_offset 75, -48
.cfi_offset 76, -40
fmov s11, w0
fmov s12, 1.0e+0
stp d13, d14, [sp, 96]
.cfi_offset 77, -32
.cfi_offset 78, -24
fmov s13, w1
str d15, [sp, 112]
.cfi_offset 79, -16
.p2align 5,,15
.L10:
ldr s15, [x22]
cmp w25, 1
ble .L3
add x0, x22, 4
.p2align 5,,15
.L4:
ldr s31, [x0], 4
fcmpe s31, s15
fcsel s15, s31, s15, gt
cmp x0, x21
bne .L4
.L3:
cmp w25, 0
ble .L1
movi v14.2s, #0
mov x19, x22
.p2align 5,,15
.L5:
ldr s0, [x19], 4
fsub s0, s0, s15
bl expf
fadd s14, s14, s0
cmp x19, x20
bne .L5
fcmpe s14, s13
bgt .L6
fcmpe s14, s11
bmi .L6
movi v14.2s, #0
.p2align 5,,15
.L7:
mov x19, x22
.p2align 5,,15
.L9:
ldr s0, [x19]
fsub s0, s0, s15
bl expf
fmul s0, s0, s14
str s0, [x19], 4
cmp x20, x19
bne .L9
add w26, w26, 1
add x22, x22, x23
add x20, x20, x23
add x21, x21, x23
cmp w24, w26
bne .L10
.L1:
ldr d15, [sp, 112]
ldp x19, x20, [sp, 16]
ldp x21, x22, [sp, 32]
ldp x23, x24, [sp, 48]
ldp x25, x26, [sp, 64]
ldp d11, d12, [sp, 80]
ldp d13, d14, [sp, 96]
ldp x29, x30, [sp], 128
.cfi_remember_state
.cfi_restore 30
.cfi_restore 29
.cfi_restore 25
.cfi_restore 26
.cfi_restore 23
.cfi_restore 24
.cfi_restore 21
.cfi_restore 22
.cfi_restore 19
.cfi_restore 20
.cfi_restore 79
.cfi_restore 77
.cfi_restore 78
.cfi_restore 75
.cfi_restore 76
.cfi_def_cfa_offset 0
ret
.p2align 2,,3
.L6:
.cfi_restore_state
fdiv s14, s12, s14
b .L7
.L18:
.cfi_def_cfa_offset 0
.cfi_restore 19
.cfi_restore 20
.cfi_restore 21
.cfi_restore 22
.cfi_restore 23
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 29
.cfi_restore 30
.cfi_restore 75
.cfi_restore 76
.cfi_restore 77
.cfi_restore 78
.cfi_restore 79
ret
.cfi_endproc
.LFE22:
.size softmax_naive, .-softmax_naive
.align 2
.p2align 5,,15
.global matrix_multiply_naive
.type matrix_multiply_naive, %function
matrix_multiply_naive:
.LFB23:
.cfi_startproc
mov w13, w3
mov x12, x0
mov x14, x1
mov w10, w4
cmp w3, 0
ble .L22
cmp w5, 0
ble .L22
sxtw x7, w5
mov x3, x2
ubfiz x5, x5, 2, 32
sxtw x15, w4
mov w9, 0
mov w11, 0
.p2align 5,,15
.L25:
add x4, x15, w9, sxtw
mov x6, x14
add x8, x12, w9, sxtw 2
mov x2, 0
add x4, x12, x4, lsl 2
.L27:
str wzr, [x3, x2, lsl 2]
cmp w10, 0
bgt .L26
add x2, x2, 1
add x6, x6, 4
cmp x7, x2
bne .L27
.p2align 5,,15
.L28:
add w11, w11, 1
add x3, x3, x5
add w9, w9, w10
cmp w13, w11
bne .L25
.L22:
ret
.p2align 2,,3
.L34:
add x2, x2, 1
add x6, x6, 4
cmp x7, x2
beq .L28
str wzr, [x3, x2, lsl 2]
.L26:
movi v31.2s, #0
mov x1, x6
mov x0, x8
.p2align 5,,15
.L29:
ldr s29, [x1]
add x1, x1, x5
ldr s30, [x0], 4
fmadd s31, s30, s29, s31
str s31, [x3, x2, lsl 2]
cmp x4, x0
bne .L29
b .L34
.cfi_endproc
.LFE23:
.size matrix_multiply_naive, .-matrix_multiply_naive
.section .rodata.str1.8,"aMS",@progbits,1
.align 3
.LC0:
.string "r"
.align 3
.LC1:
.string "Error opening file"
.align 3
.LC2:
.string "Failed to open: %s\n"
.align 3
.LC3:
.string "%d %d"
.align 3
.LC4:
.string "Error reading dimensions from %s\n"
.align 3
.LC5:
.string "Invalid dimensions (%d x %d) read from %s\n"
.align 3
.LC6:
.string "Error allocating memory for matrix"
.align 3
.LC7:
.string "%f"
.align 3
.LC8:
.string "Error reading matrix data element %d from %s\n"
.text
.align 2
.p2align 5,,15
.global load_matrix_data
.type load_matrix_data, %function
load_matrix_data:
.LFB24:
.cfi_startproc
stp x29, x30, [sp, -80]!
.cfi_def_cfa_offset 80
.cfi_offset 29, -80
.cfi_offset 30, -72
mov x29, sp
stp x19, x20, [sp, 16]
.cfi_offset 19, -64
.cfi_offset 20, -56
mov x20, x1
mov x19, x2
adrp x1, .LC0
add x1, x1, :lo12:.LC0
stp x23, x24, [sp, 48]
str x25, [sp, 64]
.cfi_offset 23, -32
.cfi_offset 24, -24
.cfi_offset 25, -16
mov x25, x0
bl fopen
cbz x0, .L46
adrp x1, .LC3
mov x3, x19
add x1, x1, :lo12:.LC3
mov x2, x20
stp x21, x22, [sp, 32]
.cfi_offset 22, -40
.cfi_offset 21, -48
mov x21, x0
bl __isoc99_fscanf
cmp w0, 2
bne .L47
ldr w3, [x19]
ldr w2, [x20]
mul w23, w2, w3
cmp w23, 0
ble .L48
ubfiz x0, x23, 2, 32
bl malloc
mov x24, x0
cbz x0, .L49
adrp x22, .LC7
mov x19, x0
add x22, x22, :lo12:.LC7
mov w20, 0
.p2align 5,,15
.L43:
mov x2, x19
mov x1, x22
mov x0, x21
add x19, x19, 4
bl __isoc99_fscanf
cmp w0, 1
bne .L50
add w20, w20, 1
cmp w23, w20
bne .L43
mov x0, x21
bl fclose
ldr x25, [sp, 64]
mov x0, x24
ldp x21, x22, [sp, 32]
.cfi_remember_state
.cfi_restore 22
.cfi_restore 21
ldp x19, x20, [sp, 16]
ldp x23, x24, [sp, 48]
ldp x29, x30, [sp], 80
.cfi_restore 30
.cfi_restore 29
.cfi_restore 25
.cfi_restore 23
.cfi_restore 24
.cfi_restore 19
.cfi_restore 20
.cfi_def_cfa_offset 0
ret
.p2align 2,,3
.L50:
.cfi_restore_state
adrp x0, stderr
mov x3, x25
mov w2, w20
adrp x1, .LC8
ldr x0, [x0, #:lo12:stderr]
add x1, x1, :lo12:.LC8
bl fprintf
mov x0, x24
bl free
mov x0, x21
bl fclose
ldp x21, x22, [sp, 32]
.cfi_restore 22
.cfi_restore 21
mov x24, 0
.L51:
ldr x25, [sp, 64]
mov x0, x24
ldp x19, x20, [sp, 16]
ldp x23, x24, [sp, 48]
ldp x29, x30, [sp], 80
.cfi_restore 30
.cfi_restore 29
.cfi_restore 25
.cfi_restore 23
.cfi_restore 24
.cfi_restore 19
.cfi_restore 20
.cfi_def_cfa_offset 0
ret
.p2align 2,,3
.L47:
.cfi_def_cfa_offset 80
.cfi_offset 19, -64
.cfi_offset 20, -56
.cfi_offset 21, -48
.cfi_offset 22, -40
.cfi_offset 23, -32
.cfi_offset 24, -24
.cfi_offset 25, -16
.cfi_offset 29, -80
.cfi_offset 30, -72
adrp x0, stderr
mov x2, x25
adrp x1, .LC4
add x1, x1, :lo12:.LC4
ldr x0, [x0, #:lo12:stderr]
mov x24, 0
bl fprintf
mov x0, x21
bl fclose
ldp x21, x22, [sp, 32]
.cfi_remember_state
.cfi_restore 22
.cfi_restore 21
b .L51
.p2align 2,,3
.L48:
.cfi_restore_state
adrp x0, stderr
mov x4, x25
adrp x1, .LC5
add x1, x1, :lo12:.LC5
ldr x0, [x0, #:lo12:stderr]
mov x24, 0
bl fprintf
mov x0, x21
bl fclose
ldp x21, x22, [sp, 32]
.cfi_restore 22
.cfi_restore 21
b .L51
.p2align 2,,3
.L46:
adrp x0, .LC1
add x0, x0, :lo12:.LC1
bl perror
mov x24, 0
adrp x0, stderr
mov x2, x25
adrp x1, .LC2
add x1, x1, :lo12:.LC2
ldr x0, [x0, #:lo12:stderr]
bl fprintf
b .L51
.L49:
.cfi_offset 21, -48
.cfi_offset 22, -40
adrp x0, .LC6
add x0, x0, :lo12:.LC6
bl perror
mov x24, 0
mov x0, x21
bl fclose
ldp x21, x22, [sp, 32]
.cfi_restore 22
.cfi_restore 21
b .L51
.cfi_endproc
.LFE24:
.size load_matrix_data, .-load_matrix_data
.section .rodata.str1.8
.align 3
.LC9:
.string "Invalid input to transpose function\n"
.align 3
.LC10:
.string "Error allocating memory for transposed matrix"
.text
.align 2
.p2align 5,,15
.global transpose
.type transpose, %function
transpose:
.LFB25:
.cfi_startproc
stp x29, x30, [sp, -64]!
.cfi_def_cfa_offset 64
.cfi_offset 29, -64
.cfi_offset 30, -56
cmp w1, 0
ccmp w2, 0, 4, gt
mov x29, sp
ccmp x0, 0, 4, gt
beq .L77
stp x19, x20, [sp, 16]
.cfi_offset 20, -40
.cfi_offset 19, -48
mov w20, w2
sxtw x19, w1
stp x23, x24, [sp, 48]
.cfi_offset 24, -8
.cfi_offset 23, -16
ubfiz x23, x20, 2, 32
sxtw x24, w2
stp x21, x22, [sp, 32]
.cfi_offset 22, -24
.cfi_offset 21, -32
mov x22, x0
mul x0, x19, x23
mov w21, w1
bl malloc
cbz x0, .L55
mov w6, 12
mov x11, x22
mov x13, x0
ubfiz x8, x21, 4, 32
umull x6, w21, w6
ubfiz x17, x21, 2, 32
ubfiz x7, x21, 3, 32
and w1, w20, -4
lsr w16, w20, 2
mov x12, 0
mov x10, 0
.p2align 5,,15
.L56:
mov w15, w10
mov w14, w12
cmp w20, 3
ble .L65
mov w9, w1
.L60:
add x5, x11, w16, uxtw 4
mov x4, x13
mov x3, x11
.p2align 5,,15
.L63:
ldp s30, s31, [x3, 8]
ldp s28, s29, [x3], 16
str s28, [x4]
str s29, [x4, x19, lsl 2]
str s30, [x4, x7]
str s31, [x4, x6]
add x4, x4, x8
cmp x5, x3
bne .L63
mov w3, w1
cmp w20, w1
beq .L78
.L58:
sub w2, w20, w9
cmp w2, 1
beq .L61
add x4, x12, w9, uxtw
umaddl x9, w19, w9, x10
add x5, x22, x4, lsl 2
ldr s30, [x22, x4, lsl 2]
add x4, x0, x9, lsl 2
ldr s31, [x5, 4]
str s30, [x0, x9, lsl 2]
str s31, [x4, x17]
tbz x2, 0, .L62
and w2, w2, -2
add w3, w3, w2
.L61:
add w14, w3, w14
madd w3, w21, w3, w15
ldr s31, [x22, x14, lsl 2]
str s31, [x0, x3, lsl 2]
.L62:
add x10, x10, 1
add x11, x11, x23
add x13, x13, 4
add x12, x12, x24
cmp w21, w10
bgt .L56
.L76:
ldp x19, x20, [sp, 16]
.cfi_remember_state
.cfi_restore 20
.cfi_restore 19
ldp x21, x22, [sp, 32]
.cfi_restore 22
.cfi_restore 21
ldp x23, x24, [sp, 48]
.cfi_restore 24
.cfi_restore 23
ldp x29, x30, [sp], 64
.cfi_restore 30
.cfi_restore 29
.cfi_def_cfa_offset 0
ret
.p2align 2,,3
.L78:
.cfi_restore_state
add x10, x10, 1
add x11, x11, x23
add x13, x13, 4
add x12, x12, x24
cmp w21, w10
ble .L76
mov w15, w10
mov w14, w12
b .L60
.p2align 2,,3
.L65:
mov w9, 0
mov w3, 0
b .L58
.L77:
.cfi_restore 19
.cfi_restore 20
.cfi_restore 21
.cfi_restore 22
.cfi_restore 23
.cfi_restore 24
adrp x1, stderr
adrp x0, .LC9
mov x2, 36
add x0, x0, :lo12:.LC9
ldr x3, [x1, #:lo12:stderr]
mov x1, 1
bl fwrite
mov x0, 0
.L79:
ldp x29, x30, [sp], 64
.cfi_restore 30
.cfi_restore 29
.cfi_def_cfa_offset 0
ret
.L55:
.cfi_def_cfa_offset 64
.cfi_offset 19, -48
.cfi_offset 20, -40
.cfi_offset 21, -32
.cfi_offset 22, -24
.cfi_offset 23, -16
.cfi_offset 24, -8
.cfi_offset 29, -64
.cfi_offset 30, -56
adrp x0, .LC10
add x0, x0, :lo12:.LC10
bl perror
mov x0, 0
ldp x19, x20, [sp, 16]
.cfi_restore 20
.cfi_restore 19
ldp x21, x22, [sp, 32]
.cfi_restore 22
.cfi_restore 21
ldp x23, x24, [sp, 48]
.cfi_restore 24
.cfi_restore 23
b .L79
.cfi_endproc
.LFE25:
.size transpose, .-transpose
.section .rodata.str1.8
.align 3
.LC11:
.string "Matrix %s (%dx%d):\n"
.align 3
.LC12:
.string " (NULL)"
.align 3
.LC13:
.string "%f "
.align 3
.LC14:
.string "----"
.text
.align 2
.p2align 5,,15
.global print_matrix
.type print_matrix, %function
print_matrix:
.LFB26:
.cfi_startproc
stp x29, x30, [sp, -96]!
.cfi_def_cfa_offset 96
.cfi_offset 29, -96
.cfi_offset 30, -88
mov x29, sp
stp x23, x24, [sp, 48]
.cfi_offset 23, -48
.cfi_offset 24, -40
mov x24, x1
mov x1, x0
adrp x0, .LC11
add x0, x0, :lo12:.LC11
stp x25, x26, [sp, 64]
.cfi_offset 25, -32
.cfi_offset 26, -24
mov w26, w2
mov w25, w3
bl printf
cbz x24, .L81
cmp w26, 0
ble .L82
mov w23, 0
stp x21, x22, [sp, 32]
.cfi_offset 22, -56
.cfi_offset 21, -64
mov w22, 0
.L83:
mov w0, 10
cmp w25, 0
bgt .L93
add w23, w23, 1
bl putchar
add w22, w22, w25
cmp w26, w23
bne .L83
ldp x21, x22, [sp, 32]
.cfi_restore 22
.cfi_restore 21
.L82:
adrp x0, .LC14
ldp x23, x24, [sp, 48]
add x0, x0, :lo12:.LC14
ldp x25, x26, [sp, 64]
ldp x29, x30, [sp], 96
.cfi_restore 30
.cfi_restore 29
.cfi_restore 25
.cfi_restore 26
.cfi_restore 23
.cfi_restore 24
.cfi_def_cfa_offset 0
b puts
.p2align 2,,3
.L93:
.cfi_def_cfa_offset 96
.cfi_offset 21, -64
.cfi_offset 22, -56
.cfi_offset 23, -48
.cfi_offset 24, -40
.cfi_offset 25, -32
.cfi_offset 26, -24
.cfi_offset 29, -96
.cfi_offset 30, -88
adrp x21, .LC13
add x21, x21, :lo12:.LC13
str x27, [sp, 80]
.cfi_offset 27, -16
sxtw x27, w25
stp x19, x20, [sp, 16]
.cfi_offset 20, -72
.cfi_offset 19, -80
.p2align 5,,15
.L85:
add x20, x27, w22, sxtw
add x19, x24, w22, uxtw 2
add x20, x24, w20, uxtw 2
.p2align 5,,15
.L84:
ldr s0, [x19], 4
mov x0, x21
fcvt d0, s0
bl printf
cmp x20, x19
bne .L84
mov w0, 10
add w23, w23, 1
bl putchar
add w22, w22, w25
cmp w26, w23
bne .L85
ldr x27, [sp, 80]
.cfi_restore 27
adrp x0, .LC14
ldp x19, x20, [sp, 16]
.cfi_restore 20
.cfi_restore 19
add x0, x0, :lo12:.LC14
ldp x21, x22, [sp, 32]
.cfi_restore 22
.cfi_restore 21
ldp x23, x24, [sp, 48]
ldp x25, x26, [sp, 64]
ldp x29, x30, [sp], 96
.cfi_restore 30
.cfi_restore 29
.cfi_restore 25
.cfi_restore 26
.cfi_restore 23
.cfi_restore 24
.cfi_def_cfa_offset 0
b puts
.L81:
.cfi_def_cfa_offset 96
.cfi_offset 23, -48
.cfi_offset 24, -40
.cfi_offset 25, -32
.cfi_offset 26, -24
.cfi_offset 29, -96
.cfi_offset 30, -88
ldp x23, x24, [sp, 48]
adrp x0, .LC12
ldp x25, x26, [sp, 64]
add x0, x0, :lo12:.LC12
ldp x29, x30, [sp], 96
.cfi_restore 30
.cfi_restore 29
.cfi_restore 25
.cfi_restore 26
.cfi_restore 23
.cfi_restore 24
.cfi_def_cfa_offset 0
b puts
.cfi_endproc
.LFE26:
.size print_matrix, .-print_matrix
.section .rodata.str1.8
.align 3
.LC15:
.string "Mismatch at row %d, col %d (index %d): A=%f, B=%f, Diff=%f\n"
.text
.align 2
.p2align 5,,15
.global compare_matrices
.type compare_matrices, %function
compare_matrices:
.LFB27:
.cfi_startproc
cmp x0, 0
ccmp x1, 0, 4, ne
beq .L105
mul w2, w2, w3
cmp w2, 0
ble .L100
sxtw x2, w2
mov x5, 0
.p2align 5,,15
.L99:
ldr s31, [x0, x5, lsl 2]
ldr s1, [x1, x5, lsl 2]
fabd s2, s31, s1
fcmpe s2, s0
bgt .L101
add x5, x5, 1
cmp x2, x5
bne .L99
.L100:
mov w0, 1
ret
.p2align 2,,3
.L105:
mov w0, 0
ret
.p2align 2,,3
.L101:
sdiv w2, w5, w3
stp x29, x30, [sp, -16]!
.cfi_def_cfa_offset 16
.cfi_offset 29, -16
.cfi_offset 30, -8
adrp x0, stderr
mov x29, sp
fcvt d2, s2
fcvt d1, s1
fcvt d0, s31
msub w3, w2, w3, w5
mov w4, w5
ldr x0, [x0, #:lo12:stderr]
adrp x1, .LC15
add x1, x1, :lo12:.LC15
bl fprintf
mov w0, 0
ldp x29, x30, [sp], 16
.cfi_restore 30
.cfi_restore 29
.cfi_def_cfa_offset 0
ret
.cfi_endproc
.LFE27:
.size compare_matrices, .-compare_matrices
.section .rodata.str1.8
.align 3
.LC16:
.string "%s/head_%02d/q_matrix.txt"
.align 3
.LC17:
.string "%s/head_%02d/k_matrix.txt"
.align 3
.LC18:
.string "%s/head_%02d/v_matrix.txt"
.align 3
.LC19:
.string "%s/head_%02d/expected_context.txt"
.align 3
.LC20:
.string "Head %02d: Failed to transpose K"
.align 3
.LC21:
.string "Head %02d: Failed to allocate QK_T_scaled"
.align 3
.LC22:
.string "Head %02d: Failed to allocate Output"
.align 3
.LC23:
.string "Head %02d: >>> TEST FAILED <<< :(\n"
.align 3
.LC24:
.string "Head %02d: Calculated output does not match expected output within tolerance %f.\n"
.text
.align 2
.p2align 5,,15
.global run_attention_test_for_head
.type run_attention_test_for_head, %function
run_attention_test_for_head:
.LFB28:
.cfi_startproc
sub sp, sp, #2192
.cfi_def_cfa_offset 2192
mov w4, w1
mov x3, x0
adrp x2, .LC16
add x2, x2, :lo12:.LC16
stp x29, x30, [sp]
.cfi_offset 29, -2192
.cfi_offset 30, -2184
mov x29, sp
stp x19, x20, [sp, 16]
.cfi_offset 19, -2176
.cfi_offset 20, -2168
mov x19, x0
add x0, sp, 144
stp x21, x22, [sp, 32]
.cfi_offset 21, -2160
.cfi_offset 22, -2152
mov w21, w1
mov x1, 512
stp x25, x26, [sp, 64]
stp x27, x28, [sp, 80]
stp wzr, wzr, [sp, 116]
stp wzr, wzr, [sp, 124]
stp wzr, wzr, [sp, 132]
str wzr, [sp, 140]
.cfi_offset 25, -2128
.cfi_offset 26, -2120
.cfi_offset 27, -2112
.cfi_offset 28, -2104
bl snprintf
mov x3, x19
mov w4, w21
mov x1, 512
add x0, sp, 656
adrp x2, .LC17
add x2, x2, :lo12:.LC17
bl snprintf
mov x3, x19
mov w4, w21
mov x1, 512
add x0, sp, 1168
adrp x2, .LC18
add x2, x2, :lo12:.LC18
bl snprintf
mov x3, x19
mov w4, w21
mov x1, 512
add x0, sp, 1680
adrp x2, .LC19
add x2, x2, :lo12:.LC19
bl snprintf
add x2, sp, 120
add x1, sp, 116
add x0, sp, 144
bl load_matrix_data
add x2, sp, 120
add x1, sp, 128
mov x27, x0
add x0, sp, 656
bl load_matrix_data
mov x19, x0
add x2, sp, 124
add x1, sp, 132
str x0, [sp, 96]
add x0, sp, 1168
bl load_matrix_data
mov x25, x0
add x2, sp, 140
add x1, sp, 136
add x0, sp, 1680
bl load_matrix_data
ldp w1, w2, [sp, 116]
mov x20, x0
mov x0, x19
bl transpose
mov x19, x0
cbz x0, .L156
stp x23, x24, [sp, 48]
.cfi_offset 24, -2136
.cfi_offset 23, -2144
ldr w24, [sp, 116]
mul w13, w24, w24
str w13, [sp, 104]
ubfiz x0, x13, 2, 32
bl malloc
mov x26, x0
ldr w13, [sp, 104]
cbz x0, .L157
ldr w22, [sp, 124]
mul w0, w24, w22
stp w0, w13, [sp, 104]
sxtw x23, w0
sbfiz x0, x0, 2, 32
bl malloc
mov x28, x0
ldr w13, [sp, 108]
cbz x0, .L158
ldr w5, [sp, 120]
cmp w24, 0
ble .L114
sxtw x2, w24
cntb x0
whilelo p15.s, wzr, w5
ubfiz x14, x24, 2, 32
mov x12, x26
add x9, x19, x14
mov w11, 0
mul x2, x2, x0
mov w10, 0
asr x1, x14, 2
index z31.s, #0, w1
.p2align 5,,15
.L115:
add x1, x27, w11, sxtw 2
mov x7, x19
mov x8, x12
.L120:
str wzr, [x8]
cmp w5, 0
bgt .L119
add x7, x7, 4
add x8, x8, 4
cmp x7, x9
bne .L120
.p2align 5,,15
.L118:
add w10, w10, 1
add x12, x12, x14
add w11, w11, w5
cmp w24, w10
bne .L115
scvtf s0, w5
fcmp s0, #0.0
bpl .L140
str w13, [sp, 108]
bl sqrtf
fmov s30, 1.0e+0
ldr w13, [sp, 108]
fdiv s30, s30, s0
b .L124
.p2align 2,,3
.L159:
add x7, x7, 4
str s30, [x8], 4
cmp x7, x9
beq .L118
str wzr, [x8]
.L119:
movi v30.2s, #0
mov x4, x7
mov p7.b, p15.b
mov x0, 0
.p2align 5,,15
.L117:
ld1w z29.s, p7/z, [x1, x0, lsl 2]
ld1w z28.s, p7/z, [x4, z31.s, sxtw 2]
incw x0
fmul z29.s, p7/m, z29.s, z28.s
add x4, x4, x2
fadda s30, p7, s30, z29.s
whilelo p7.s, w0, w5
b.any .L117
b .L159
.L140:
fsqrt s0, s0
fmov s30, 1.0e+0
fdiv s30, s30, s0
.L124:
mov x0, 0
mov z30.s, s30
whilelo p7.s, wzr, w13
.p2align 5,,15
.L131:
ld1w z31.s, p7/z, [x26, x0, lsl 2]
fmul z31.s, p7/m, z31.s, z30.s
st1w z31.s, p7, [x26, x0, lsl 2]
incw x0
whilelo p7.s, w0, w13
b.any .L131
.L132:
mov w2, w24
mov w1, w24
mov x0, x26
bl softmax_naive
cmp w24, 0
ble .L129
cmp w22, 0
ble .L129
sxtw x2, w22
cntb x0
index z27.s, #0, w22
ubfiz x8, x22, 2, 32
mov x7, x28
whilelo p15.s, wzr, w24
ubfiz x10, x24, 2, 32
mov x1, x26
mul x2, x2, x0
mov w9, 0
.p2align 5,,15
.L130:
mov x5, 0
.p2align 5,,15
.L134:
movi v26.2s, #0
add x4, x25, x5
mov p7.b, p15.b
mov x0, 0
.p2align 5,,15
.L133:
ld1w z25.s, p7/z, [x1, x0, lsl 2]
ld1w z24.s, p7/z, [x4, z27.s, sxtw 2]
incw x0
fmul z25.s, p7/m, z25.s, z24.s
add x4, x4, x2
fadda s26, p7, s26, z25.s
whilelo p7.s, w0, w24
b.any .L133
str s26, [x7, x5]
add x5, x5, 4
cmp x8, x5
bne .L134
add w9, w9, 1
add x7, x7, x8
add x1, x1, x10
cmp w24, w9
bne .L130
.L129:
cbz x20, .L160
ldr w0, [sp, 104]
cmp w0, 0
ble .L139
mov w0, 46871
mov x3, 0
movk w0, 0x38d1, lsl 16
fmov s31, w0
.L138:
ldr s0, [x28, x3, lsl 2]
ldr s1, [x20, x3, lsl 2]
fabd s2, s0, s1
fcmpe s2, s31
bgt .L142
add x3, x3, 1
cmp x23, x3
bne .L138
.L139:
ldp x23, x24, [sp, 48]
.cfi_restore 24
.cfi_restore 23
mov w21, 1
.L112:
mov x0, x27
bl free
ldr x0, [sp, 96]
bl free
mov x0, x25
bl free
mov x0, x20
bl free
mov x0, x19
bl free
mov x0, x26
bl free
mov x0, x28
bl free
ldp x29, x30, [sp]
mov w0, w21
ldp x19, x20, [sp, 16]
ldp x21, x22, [sp, 32]
ldp x25, x26, [sp, 64]
ldp x27, x28, [sp, 80]
add sp, sp, 2192
.cfi_restore 27
.cfi_restore 28
.cfi_restore 25
.cfi_restore 26
.cfi_restore 21
.cfi_restore 22
.cfi_restore 19
.cfi_restore 20
.cfi_restore 29
.cfi_restore 30
.cfi_def_cfa_offset 0
ret
.L160:
.cfi_def_cfa_offset 2192
.cfi_offset 19, -2176
.cfi_offset 20, -2168
.cfi_offset 21, -2160
.cfi_offset 22, -2152
.cfi_offset 23, -2144
.cfi_offset 24, -2136
.cfi_offset 25, -2128
.cfi_offset 26, -2120
.cfi_offset 27, -2112
.cfi_offset 28, -2104
.cfi_offset 29, -2192
.cfi_offset 30, -2184
adrp x23, stderr
.L135:
mov w1, w21
adrp x0, .LC23
add x0, x0, :lo12:.LC23
bl printf
adrp x1, .LC25
mov w2, w21
ldr x0, [x23, #:lo12:stderr]
mov w21, 0
ldr d0, [x1, #:lo12:.LC25]
adrp x1, .LC24
add x1, x1, :lo12:.LC24
bl fprintf
ldp x23, x24, [sp, 48]
.cfi_remember_state
.cfi_restore 24
.cfi_restore 23
b .L112
.L142:
.cfi_restore_state
sdiv w2, w3, w22
adrp x23, stderr
mov w4, w3
fcvt d2, s2
fcvt d1, s1
fcvt d0, s0
ldr x0, [x23, #:lo12:stderr]
msub w3, w2, w22, w3
adrp x1, .LC15
add x1, x1, :lo12:.LC15
bl fprintf
b .L135
.L156:
.cfi_restore 23
.cfi_restore 24
adrp x0, .LC20
add x0, x0, :lo12:.LC20
bl perror
.L110:
mov w21, 0
mov x28, 0
mov x26, 0
b .L112
.L114:
.cfi_offset 23, -2144
.cfi_offset 24, -2136
scvtf s0, w5
fcmp s0, #0.0
bpl .L141
str w13, [sp, 108]
bl sqrtf
ldr w13, [sp, 108]
cbz w13, .L132
.L128:
fmov s30, 1.0e+0
fdiv s30, s30, s0
b .L124
.p2align 2,,3
.L141:
fsqrt s0, s0
cbz w13, .L132
b .L128
.L158:
adrp x0, .LC22
mov w21, 0
add x0, x0, :lo12:.LC22
bl perror
ldp x23, x24, [sp, 48]
.cfi_remember_state
.cfi_restore 24
.cfi_restore 23
b .L112
.L157:
.cfi_restore_state
adrp x0, .LC21
add x0, x0, :lo12:.LC21
bl perror
ldp x23, x24, [sp, 48]
.cfi_restore 24
.cfi_restore 23
b .L110
.cfi_endproc
.LFE28:
.size run_attention_test_for_head, .-run_attention_test_for_head
.section .rodata.str1.8
.align 3
.LC26:
.string "tests/workload_different_sizes/real_qkv_core_test_data_layer0_32"
.align 3
.LC27:
.string "Error: Number of heads must be positive.\n"
.align 3
.LC28:
.string "Usage: %s [base_directory_path num_heads]\n"
.align 3
.LC29:
.string "Example: %s real_qkv_core_test_data_layer0 12\n"
.align 3
.LC30:
.string "Starting Attention Core Test"
.align 3
.LC31:
.string "Base Directory: %s\n"
.align 3
.LC32:
.string "\n--- Test Summary ---"
.align 3
.LC33:
.string "------------------------"
.align 3
.LC34:
.string ">>> OVERALL TEST PASSED <<< :D"
.align 3
.LC35:
.string "One or more head tests FAILED."
.align 3
.LC36:
.string ">>> OVERALL TEST FAILED <<< D:"
.section .text.startup,"ax",@progbits
.align 2
.p2align 5,,15
.global main
.type main, %function
main:
.LFB29:
.cfi_startproc
stp x29, x30, [sp, -48]!
.cfi_def_cfa_offset 48
.cfi_offset 29, -48
.cfi_offset 30, -40
mov x29, sp
stp x19, x20, [sp, 16]
.cfi_offset 19, -32
.cfi_offset 20, -24
mov x19, x1
stp x21, x22, [sp, 32]
.cfi_offset 21, -16
.cfi_offset 22, -8
cmp w0, 3
beq .L171
cmp w0, 1
bne .L172
adrp x21, .LC26
add x21, x21, :lo12:.LC26
mov w22, 12
.L163:
mov w19, 0
mov w20, 1
adrp x0, .LC30
add x0, x0, :lo12:.LC30
bl puts
mov x1, x21
adrp x0, .LC31
add x0, x0, :lo12:.LC31
bl printf
.p2align 5,,15
.L166:
mov w1, w19
mov x0, x21
bl run_attention_test_for_head
tst w0, 255
ccmp w20, 0, 4, ne
add w19, w19, 1
cset w20, ne
cmp w22, w19
bne .L166
adrp x0, .LC32
add x0, x0, :lo12:.LC32
bl puts
cbz w20, .L167
adrp x19, .LC33
add x19, x19, :lo12:.LC33
mov x0, x19
bl puts
adrp x0, .LC34
add x0, x0, :lo12:.LC34
bl puts
mov x0, x19
bl puts
mov w0, 0
b .L161
.L167:
adrp x0, .LC35
adrp x19, .LC33
add x0, x0, :lo12:.LC35
add x19, x19, :lo12:.LC33
bl puts
mov x0, x19
bl puts
adrp x0, .LC36
add x0, x0, :lo12:.LC36
bl puts
mov x0, x19
bl puts
.L164:
mov w0, 1
.L161:
ldp x19, x20, [sp, 16]
ldp x21, x22, [sp, 32]
ldp x29, x30, [sp], 48
.cfi_remember_state
.cfi_restore 30
.cfi_restore 29
.cfi_restore 21
.cfi_restore 22
.cfi_restore 19
.cfi_restore 20
.cfi_def_cfa_offset 0
ret
.L172:
.cfi_restore_state
adrp x20, stderr
adrp x1, .LC28
ldr x2, [x19]
add x1, x1, :lo12:.LC28
ldr x0, [x20, #:lo12:stderr]
bl fprintf
ldr x2, [x19]
adrp x1, .LC29
ldr x0, [x20, #:lo12:stderr]
add x1, x1, :lo12:.LC29
bl fprintf
b .L164
.L171:
ldp x21, x0, [x1, 8]
mov w2, 10
mov x1, 0
bl strtol
mov w22, w0
cmp w0, 0
bgt .L163
adrp x1, stderr
adrp x0, .LC27
mov x2, 41
add x0, x0, :lo12:.LC27
ldr x3, [x1, #:lo12:stderr]
mov x1, 1
bl fwrite
b .L164
.cfi_endproc
.LFE29:
.size main, .-main
.section .rodata.cst8,"aM",@progbits,8
.align 3
.LC25:
.word -536870912
.word 1058682594
.ident "GCC: (GNU) 14.1.0"
.section .note.GNU-stack,"",@progbits
|
crpboy/NoAxiom-HAL
| 4,296
|
arch/src/rv64/trap.S
|
# TrapContext structure:
# 0 - user_reg
# 256 - sstatus
# 264 - sepc
# 272 - kernel_satp
# 280 - kernel_ra
# 288 - kernel_reg (callee_saved)
# 384 - kernel_tp
.altmacro
.macro STORE_X n # store user regs
sd x\n, \n*8(sp)
.endm
.macro LOAD_X n # load user regs
ld x\n, \n*8(sp)
.endm
.macro STORE_S n, offset # store kernel callee-saved regs
sd s\n, \offset*8(a0)
.endm
.macro LOAD_S n, offset # load kernel callee-saved regs
ld s\n, \offset*8(sp)
.endm
.macro STORE_GENERAL_REG
sd ra, 1*8(sp)
.set n, 3
.rept 29
STORE_X %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
csrr t2, sscratch
sd t0, 32*8(sp)
sd t1, 33*8(sp)
.endm
.macro LOAD_GENERAL_REG
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld ra, 1*8(sp)
.set n, 3
.rept 29
LOAD_X %n
.set n, n+1
.endr
ld sp, 2*8(sp)
.endm
.macro KERNEL_SAVE_REG
addi sp, sp, -17*8
sd ra, 1*8(sp)
sd t0, 2*8(sp)
sd t1, 3*8(sp)
sd t2, 4*8(sp)
sd t3, 5*8(sp)
sd t4, 6*8(sp)
sd t5, 7*8(sp)
sd t6, 8*8(sp)
sd a0, 9*8(sp)
sd a1, 10*8(sp)
sd a2, 11*8(sp)
sd a3, 12*8(sp)
sd a4, 13*8(sp)
sd a5, 14*8(sp)
sd a6, 15*8(sp)
sd a7, 16*8(sp)
.endm
.macro KERNEL_LOAD_REG
ld ra, 1*8(sp)
ld t0, 2*8(sp)
ld t1, 3*8(sp)
ld t2, 4*8(sp)
ld t3, 5*8(sp)
ld t4, 6*8(sp)
ld t5, 7*8(sp)
ld t6, 8*8(sp)
ld a0, 9*8(sp)
ld a1, 10*8(sp)
ld a2, 11*8(sp)
ld a3, 12*8(sp)
ld a4, 13*8(sp)
ld a5, 14*8(sp)
ld a6, 15*8(sp)
ld a7, 16*8(sp)
addi sp, sp, 17*8
.endm
.section .text.trampoline
.globl __user_trapvec
.globl __user_trapret
.globl __kernel_trapvec
.globl __kernel_user_ptr_vec
.align 2
# user -> kernel
__user_trapvec:
csrrw sp, sscratch, sp
STORE_GENERAL_REG
sd t2, 2*8(sp)
ld ra, 35*8(sp) # move to kernel_sp
.set n, 0 # load callee-saved regs
.set offset, 36
.rept 12
LOAD_S %n offset
.set n, n+1
.set offset, offset+1
.endr
ld fp, 48*8(sp) # load kernel fp
ld tp, 49*8(sp)
ld sp, 34*8(sp)
ret # return to kernel ra
# kernel -> user
__user_trapret:
csrw sscratch, a0
sd sp, 34*8(a0) # save kernel callee-saved regs
sd ra, 35*8(a0)
.set n, 0
.set offset, 36
.rept 12
STORE_S %n offset
.set n, n+1
.set offset, offset+1
.endr
sd fp, 48*8(a0)
sd tp, 49*8(a0)
mv sp, a0
LOAD_GENERAL_REG
sret
# kernel -> kernel
# only need to save caller-saved regs
# note that we don't save sepc & stvec here
__kernel_trapvec:
KERNEL_SAVE_REG
call rv_kernel_trap_handler
KERNEL_LOAD_REG
sret
.globl __kernel_user_ptr_vec
.align 12
__kernel_user_ptr_vec:
csrr a0, sepc
addi a0, a0, 4
csrw sepc, a0
li a0, 1
csrr a1, scause
sret
.globl __try_read_user
.align 4
__try_read_user:
mv a1, a0
mv a0, zero
.option push
.option norvc
lb a1, 0(a1)
.option pop
ret
.globl __try_write_user
.align 4
__try_write_user:
mv a2, a0
mv a0, zero
.option push
.option norvc
lb a1, 0(a2)
sb a1, 0(a2)
.option pop
ret
.section .text.signal
.globl user_sigreturn
.align 12
user_sigreturn:
li a7, 139 # syscall SIGRETURN
ecall
|
crpboy/NoAxiom-HAL
| 6,868
|
arch/src/la64/trap.S
|
.equ CSR_PRMD, 0x1
.equ CSR_ESTAT, 0x5
.equ CSR_ERA, 0x6
.equ CSR_PGDL, 0x19
.equ CSR_PGD, 0x1b
.equ CSR_SAVE, 0x30
.equ CSR_SAVE1, 0x31
.altmacro
.macro STORE_X n # store user regs
st.d $r\n, $sp, \n*8
.endm
.macro LOAD_X n # load user regs
ld.d $r\n, $sp, \n*8
.endm
.macro STORE_S n, offset # store kernel callee-saved regs
st.d $s\n, $a0, \offset*8
.endm
.macro LOAD_S n, offset # load kernel callee-saved regs
ld.d $s\n, $sp, \offset*8
.endm
# user -> kernel
.section .text
.globl __user_trapvec
.align 12
__user_trapvec:
csrwr $sp, CSR_SAVE # take the kernel sp
# store user genereal regs
st.d $ra, $sp, 1*8 # x1 / ra (skip x0)
st.d $tp, $sp, 2*8 # x2 / tp
.set n, 4 # skip x3 / sp, start from x4, sp will be saved outside from macro
.rept 28
STORE_X %n
.set n, n+1
.endr
csrrd $t0, CSR_PRMD # LA_PRMD <> sstatus
csrrd $t1, CSR_ERA # LA_ERA <> sepc
csrrd $t2, CSR_SAVE # LA_tmp_sp <> sscratch
st.d $t0, $sp, 32*8
st.d $t1, $sp, 33*8
st.d $t2, $sp, 3*8 # store sp (x3 in la64)
# load kernel regs
ld.d $ra, $sp, 35*8 # load kernel_sp
.set n, 0 # load callee-saved regs
.set offset, 36
.rept 10 # only save s0~s9
LOAD_S %n, %offset
.set n, n+1
.set offset, offset+1
.endr
ld.d $fp, $sp, 48*8 # load kernel fp
ld.d $tp, $sp, 49*8 # load kernel tp
ld.d $sp, $sp, 34*8 # load kernel sp
jirl $zero, $ra, 0 # return to kernel ra
# kernel -> user
.globl __user_trapret
.align 12
__user_trapret:
csrwr $a0, CSR_SAVE
csrrd $a0, CSR_SAVE # to use correct a0
# store kernel regs
st.d $sp, $a0, 34*8 # save kernel callee-saved reg
st.d $ra, $a0, 35*8
.set n, 0
.set offset, 36
.rept 10
STORE_S %n, %offset
.set n, n+1
.set offset, offset+1
.endr
st.d $fp, $a0, 48*8
st.d $tp, $a0, 49*8
or $sp, $a0, $zero
# load user general regs
ld.d $t0, $sp, 32*8
ld.d $t1, $sp, 33*8
csrwr $t0, CSR_PRMD # PRMD
csrwr $t1, CSR_ERA # ERA
ld.d $ra, $sp, 1*8 # x1 / ra (skip x0)
ld.d $tp, $sp, 2*8 # x2 / tp
.set n, 4 # skip x3 / sp, start from x4
.rept 28
LOAD_X %n
.set n, n+1
.endr
ld.d $sp, $sp, 3*8 # load sp finally
ibar 0 # sync instruction cache
ertn # la: exception return <> rv: sret
# kernel -> kernel
.globl __kernel_trapvec
.align 12
__kernel_trapvec:
csrwr $sp, CSR_SAVE1 # save sp since we will align it
csrrd $sp, CSR_SAVE1
addi.d $sp, $sp, -64*8 # reserve enough space for trap frame (currently use 512 bytes)
srli.d $sp, $sp, 3 # align sp to avoid unaligned exception
slli.d $sp, $sp, 3
STORE_X %1
STORE_X %2
.set n, 4
.rept 28
STORE_X %n
.set n, n+1
.endr
csrrd $t0, CSR_PRMD
csrrd $t1, CSR_ERA
csrrd $t2, CSR_SAVE1 # use SAVE1 to represent sp, we've already saved it
st.d $t0, $sp, 32*8
st.d $t1, $sp, 33*8
st.d $t2, $sp, 3*8 # store sp (x3 in la64)
move $a0, $sp # pass trap frame to handler
bl la_kernel_trap_handler # handle other traps
ld.d $t0, $sp, 32*8
ld.d $t1, $sp, 33*8
csrwr $t0, CSR_PRMD # PRMD
csrwr $t1, CSR_ERA # ERA
LOAD_X %1
LOAD_X %2
.set n, 4
.rept 28
LOAD_X %n
.set n, n+1
.endr
ld.d $sp, $sp, 3*8
ertn
.globl __kernel_user_ptr_vec
.align 12
__kernel_user_ptr_vec:
csrrd $a0, CSR_ERA
addi.d $a0, $a0, 4
csrwr $a0, CSR_ERA
li.d $a0, 1
csrrd $a1, CSR_ESTAT
ertn
.globl __try_read_user
.align 4
__try_read_user:
move $a1, $a0
move $a0, $zero
ld.b $a1, $a1, 0
jirl $zero, $ra, 0
.globl __try_write_user
.align 4
__try_write_user:
move $a2, $a0
move $a0, $zero
ld.b $a1, $a2, 0
st.b $a1, $a2, 0
jirl $zero, $ra, 0
# float registers store & load macros
FP_START = 0
FP_END = 32
.macro SAVE_FP n, m
fst.d $f\n, $a0, \m*8
.endm
.macro LOAD_FP n, m
fld.d $f\n, $a0, \m*8
.endm
.globl __save_freg
.globl __load_freg
.align 12
# save float registers
.align 12
__save_freg:
.set n, 0
.set m, FP_START
.rept 32
SAVE_FP %n, %m # save freg
.set n, n+1
.set m, m+1
.endr
movfcsr2gr $t0, $fcsr0 # save FCSR
st.w $t0, $a0, FP_END*8
movcf2gr $t0, $fcc7 # save FCC
slli.w $t0, $t0, 1
movcf2gr $t0, $fcc6
slli.w $t0, $t0, 1
movcf2gr $t0, $fcc5
slli.w $t0, $t0, 1
movcf2gr $t0, $fcc4
slli.w $t0, $t0, 1
movcf2gr $t0, $fcc3
slli.w $t0, $t0, 1
movcf2gr $t0, $fcc2
slli.w $t0, $t0, 1
movcf2gr $t0, $fcc1
slli.w $t0, $t0, 1
movcf2gr $t0, $fcc0
st.b $t0, $a0, FP_END*8+4 # offset: 32*freg+1*fcsr
ret
# load float registers
.align 12
__load_freg:
.set n, 0
.set m, FP_START
.rept 32
LOAD_FP %n, %m
.set n, n+1
.set m, m+1
.endr
ld.w $t0, $a0, FP_END*8 # restore FCSR
movgr2fcsr $fcsr0, $t0
ld.b $t0, $a0, FP_END*8+4 # restore FCC
movgr2cf $fcc0, $t0
srli.w $t0, $t0, 1
movgr2cf $fcc1, $t0
srli.w $t0, $t0, 1
movgr2cf $fcc2, $t0
srli.w $t0, $t0, 1
movgr2cf $fcc3, $t0
srli.w $t0, $t0, 1
movgr2cf $fcc4, $t0
srli.w $t0, $t0, 1
movgr2cf $fcc5, $t0
srli.w $t0, $t0, 1
movgr2cf $fcc6, $t0
srli.w $t0, $t0, 1
movgr2cf $fcc7, $t0
ret
.section .text.signal
.globl user_sigreturn
.align 12
user_sigreturn:
ori $a7, $zero, 139 # syscall SIGRETURN
syscall 0
|
crpboy/NoAxiom-HAL
| 4,334
|
arch/src/la64/tlb.S
|
# fixme: `addi.d $t0, $t0, -1` asserts there's only valid bit in flags
# PGD: 0x1b CRMD:0x0 PWCL:0x1c TLBRBADV:0x89 TLBERA:0x8a TLBRSAVE:0x8b SAVE:0x30
# TLBREHi: 0x8e STLBPS: 0x1e MERRsave:0x95
# reference: https://gitlab.eduxiji.net/educg-group-22027-2376549/T202410699992496-1562/-/blob/NPUcore-FF/os/src/arch/la64/trap/mod.rs#L43
# notes: some consts are added to make the code more readable
.equ LA_CSR_CRMD, 0x00 # Current mode
.equ LA_CSR_TLBEHI, 0x11 # TLB entry high
.equ LA_CSR_TLBELO0, 0x12 # TLB entry low 0
.equ LA_CSR_TLBELO1, 0x13 # TLB entry low 1
.equ LA_CSR_PGDL, 0x19 # Page table base address when VA[47] = 0
.equ LA_CSR_PGDH, 0x1a # Page table base address when VA[47] = 1
.equ LA_CSR_PGD, 0x1b # Page table base
.equ LA_CSR_TLBRENTRY, 0x88 # TLB refill exception entry
.equ LA_CSR_TLBRBADV, 0x89 # TLB refill badvaddr
.equ LA_CSR_TLBRERA, 0x8a # TLB refill ERA
.equ LA_CSR_TLBRSAVE, 0x8b # KScratch for TLB refill exception
.equ LA_CSR_TLBRELO0, 0x8c # TLB refill entrylo0
.equ LA_CSR_TLBRELO1, 0x8d # TLB refill entrylo1
.equ LA_CSR_TLBREHI, 0x8e # TLB refill entryhi
.globl __tlb_refill
.balign 4096
__tlb_refill:
csrwr $t0, LA_CSR_TLBRSAVE
csrrd $t0, LA_CSR_PGD
# lddir $t0, $t0, 3
# beqz $t0, 1f
lddir $t0, $t0, 2
beqz $t0, 1f
lddir $t0, $t0, 1
beqz $t0, 1f
ldpte $t0, 0
ldpte $t0, 1
csrrd $t0, LA_CSR_TLBRELO0
csrrd $t0, LA_CSR_TLBRELO1
csrrd $t0, LA_CSR_CRMD
2:
tlbfill
csrrd $t0, LA_CSR_TLBRBADV
srli.d $t0, $t0, 13
slli.d $t0, $t0, 13
csrwr $t0, LA_CSR_TLBEHI
tlbsrch
tlbrd
csrrd $t0, LA_CSR_TLBELO0
csrrd $t0, LA_CSR_TLBELO1
csrrd $t0, LA_CSR_TLBRSAVE
ertn
1:
csrrd $t0, LA_CSR_TLBREHI
ori $t0, $t0, 0xC
csrwr $t0, LA_CSR_TLBREHI
rotri.d $t0, $t0, 61
ori $t0, $t0, 3
rotri.d $t0, $t0, 3
csrwr $t0, LA_CSR_TLBRELO0
csrrd $t0, LA_CSR_TLBRELO0
csrwr $t0, LA_CSR_TLBRELO1
b 2b
# __tlb_refill:
# csrwr $t0, LA_CSR_TLBRSAVE # store temporary data
# # walk the page table
# csrrd $t0, LA_CSR_PGD
# lddir $t0, $t0, 3 # load: (39, 30]
# andi $t0, $t0, 1
# beqz $t0, 1f # valid check
# csrrd $t0, LA_CSR_PGD
# lddir $t0, $t0, 3 # reload: (39, 30]
# addi.d $t0, $t0, -1 # sub valid flag
# lddir $t0, $t0, 1 # load: (30, 21]
# andi $t0, $t0, 1
# beqz $t0, 1f # valid check
# csrrd $t0, LA_CSR_PGD # reload: (39, 30]
# lddir $t0, $t0, 3 # reload: (30, 21]
# addi.d $t0, $t0, -1
# lddir $t0, $t0, 1 # load: (21, 12]
# addi.d $t0, $t0, -1 # no need to check validality since we're in the lowest level
# ldpte $t0, 0 # even page pte, will update TLBRELO0
# ldpte $t0, 1 # odd page pte, will update TLBRELO1
# # csrrd $t0, LA_CSR_TLBRELO0
# # csrrd $t0, LA_CSR_TLBRELO1
# # csrrd $t0, 0x0 # fixme: is these instructions necessary? it just read the csr value
# # complete the TLB refill and return
# 2:
# tlbfill
# csrrd $t0, LA_CSR_TLBRBADV
# srli.d $t0, $t0, 13
# slli.d $t0, $t0, 13
# csrwr $t0, LA_CSR_TLBEHI
# tlbsrch
# tlbrd
# csrrd $t0, LA_CSR_TLBELO0
# csrrd $t0, LA_CSR_TLBELO1
# csrwr $t0, LA_CSR_TLBRSAVE # restore temporary data, use csrwr to avoid data loss
# ertn
# # will trigger page fault
# 1:
# csrrd $t0, LA_CSR_TLBREHI
# ori $t0, $t0, 0xC
# csrwr $t0, LA_CSR_TLBREHI
# rotri.d $t0, $t0, 61
# ori $t0, $t0, 3 # fixme: is 3 correct? should be 7?
# rotri.d $t0, $t0, 3
# csrwr $t0, LA_CSR_TLBRELO0
# csrrd $t0, LA_CSR_TLBRELO0
# csrwr $t0, LA_CSR_TLBRELO1
# b 2b
|
cryptix-network/cryptix-miner-cpu
| 10,572
|
src/asm/keccakf1600_x86-64-win64.s
|
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.def __KeccakF1600; .scl 3; .type 32; .endef
.p2align 5
__KeccakF1600:
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.p2align 5
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.globl KeccakF1600
.def KeccakF1600; .scl 2; .type 32; .endef
.p2align 5
KeccakF1600:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_KeccakF1600:
movq %rcx,%rdi
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
leaq 100(%rdi),%rdi
subq $200,%rsp
.LSEH_body_KeccakF1600:
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.LSEH_epilogue_KeccakF1600:
mov 8(%r11),%rdi
mov 16(%r11),%rsi
.byte 0xf3,0xc3
.LSEH_end_KeccakF1600:
.globl SHA3_absorb
.def SHA3_absorb; .scl 2; .type 32; .endef
.p2align 5
SHA3_absorb:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_SHA3_absorb:
movq %rcx,%rdi
movq %rdx,%rsi
movq %r8,%rdx
movq %r9,%rcx
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
leaq 100(%rdi),%rdi
subq $232,%rsp
.LSEH_body_SHA3_absorb:
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
.Loop_absorb:
cmpq %rcx,%rdx
jc .Ldone_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
.Lblock_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz .Lblock_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp .Loop_absorb
.p2align 5
.Ldone_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.LSEH_epilogue_SHA3_absorb:
mov 8(%r11),%rdi
mov 16(%r11),%rsi
.byte 0xf3,0xc3
.LSEH_end_SHA3_absorb:
.globl SHA3_squeeze
.def SHA3_squeeze; .scl 2; .type 32; .endef
.p2align 5
SHA3_squeeze:
.byte 0xf3,0x0f,0x1e,0xfa
movq %rdi,8(%rsp)
movq %rsi,16(%rsp)
movq %rsp,%r11
.LSEH_begin_SHA3_squeeze:
movq %rcx,%rdi
movq %rdx,%rsi
movq %r8,%rdx
movq %r9,%rcx
pushq %r12
pushq %r13
pushq %r14
subq $32,%rsp
.LSEH_body_SHA3_squeeze:
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp .Loop_squeeze
.p2align 5
.Loop_squeeze:
cmpq $8,%r13
jb .Ltail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz .Ldone_squeeze
subq $1,%rcx
jnz .Loop_squeeze
movq %rdi,%rcx
call KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp .Loop_squeeze
.Ltail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
.Ldone_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.LSEH_epilogue_SHA3_squeeze:
mov 8(%rsp),%rdi
mov 16(%rsp),%rsi
.byte 0xf3,0xc3
.LSEH_end_SHA3_squeeze:
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .pdata
.p2align 2
.rva .LSEH_begin_KeccakF1600
.rva .LSEH_body_KeccakF1600
.rva .LSEH_info_KeccakF1600_prologue
.rva .LSEH_body_KeccakF1600
.rva .LSEH_epilogue_KeccakF1600
.rva .LSEH_info_KeccakF1600_body
.rva .LSEH_epilogue_KeccakF1600
.rva .LSEH_end_KeccakF1600
.rva .LSEH_info_KeccakF1600_epilogue
.rva .LSEH_begin_SHA3_absorb
.rva .LSEH_body_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_prologue
.rva .LSEH_body_SHA3_absorb
.rva .LSEH_epilogue_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_body
.rva .LSEH_epilogue_SHA3_absorb
.rva .LSEH_end_SHA3_absorb
.rva .LSEH_info_SHA3_absorb_epilogue
.rva .LSEH_begin_SHA3_squeeze
.rva .LSEH_body_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_prologue
.rva .LSEH_body_SHA3_squeeze
.rva .LSEH_epilogue_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_body
.rva .LSEH_epilogue_SHA3_squeeze
.rva .LSEH_end_SHA3_squeeze
.rva .LSEH_info_SHA3_squeeze_epilogue
.section .xdata
.p2align 3
.LSEH_info_KeccakF1600_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_KeccakF1600_body:
.byte 1,0,18,0
.byte 0x00,0xf4,0x19,0x00
.byte 0x00,0xe4,0x1a,0x00
.byte 0x00,0xd4,0x1b,0x00
.byte 0x00,0xc4,0x1c,0x00
.byte 0x00,0x54,0x1d,0x00
.byte 0x00,0x34,0x1e,0x00
.byte 0x00,0x74,0x20,0x00
.byte 0x00,0x64,0x21,0x00
.byte 0x00,0x01,0x1f,0x00
.byte 0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_KeccakF1600_epilogue:
.byte 1,0,5,11
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0xb3
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_absorb_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_SHA3_absorb_body:
.byte 1,0,18,0
.byte 0x00,0xf4,0x1d,0x00
.byte 0x00,0xe4,0x1e,0x00
.byte 0x00,0xd4,0x1f,0x00
.byte 0x00,0xc4,0x20,0x00
.byte 0x00,0x54,0x21,0x00
.byte 0x00,0x34,0x22,0x00
.byte 0x00,0x74,0x24,0x00
.byte 0x00,0x64,0x25,0x00
.byte 0x00,0x01,0x23,0x00
.byte 0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_absorb_epilogue:
.byte 1,0,5,11
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0xb3
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00
.LSEH_info_SHA3_squeeze_prologue:
.byte 1,0,5,0x0b
.byte 0,0x74,1,0
.byte 0,0x64,2,0
.byte 0,0xb3
.byte 0,0
.long 0,0
.LSEH_info_SHA3_squeeze_body:
.byte 1,0,11,0
.byte 0x00,0xe4,0x04,0x00
.byte 0x00,0xd4,0x05,0x00
.byte 0x00,0xc4,0x06,0x00
.byte 0x00,0x74,0x08,0x00
.byte 0x00,0x64,0x09,0x00
.byte 0x00,0x62
.byte 0x00,0x00,0x00,0x00,0x00,0x00
.LSEH_info_SHA3_squeeze_epilogue:
.byte 1,0,4,0
.byte 0x00,0x74,0x01,0x00
.byte 0x00,0x64,0x02,0x00
.byte 0x00,0x00,0x00,0x00
|
cryptix-network/cryptix-miner-cpu
| 8,238
|
src/asm/keccakf1600_x86-64-osx.s
|
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.p2align 5
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp L$oop
.p2align 5
L$oop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz L$oop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.globl _KeccakF1600
.p2align 5
_KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.globl _SHA3_absorb
.p2align 5
_SHA3_absorb:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $232,%rsp
.cfi_adjust_cfa_offset 232
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
L$oop_absorb:
cmpq %rcx,%rdx
jc L$done_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
L$block_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz L$block_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp L$oop_absorb
.p2align 5
L$done_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.globl _SHA3_squeeze
.p2align 5
_SHA3_squeeze:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-32
subq $32,%rsp
.cfi_adjust_cfa_offset 32
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp L$oop_squeeze
.p2align 5
L$oop_squeeze:
cmpq $8,%r13
jb L$tail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz L$done_squeeze
subq $1,%rcx
jnz L$oop_squeeze
movq %rdi,%rcx
call _KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp L$oop_squeeze
L$tail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
L$done_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.cfi_adjust_cfa_offset -56
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.byte 0xf3,0xc3
.cfi_endproc
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
cryptix-network/cryptix-miner-cpu
| 8,619
|
src/asm/keccakf1600_x86-64-elf.s
|
# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.type __KeccakF1600,@function
.align 32
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.align 32
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.size __KeccakF1600,.-__KeccakF1600
.globl KeccakF1600
.type KeccakF1600,@function
.align 32
KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
leaq 248(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.size KeccakF1600,.-KeccakF1600
.globl SHA3_absorb
.type SHA3_absorb,@function
.align 32
SHA3_absorb:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $232,%rsp
.cfi_adjust_cfa_offset 232
movq %rsi,%r9
leaq 100(%rsp),%rsi
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
movq %rcx,216-100(%rsi)
.Loop_absorb:
cmpq %rcx,%rdx
jc .Ldone_absorb
shrq $3,%rcx
leaq -100(%rdi),%r8
.Lblock_absorb:
movq (%r9),%rax
leaq 8(%r9),%r9
xorq (%r8),%rax
leaq 8(%r8),%r8
subq $8,%rdx
movq %rax,-8(%r8)
subq $1,%rcx
jnz .Lblock_absorb
movq %r9,200-100(%rsi)
movq %rdx,208-100(%rsi)
call __KeccakF1600
movq 200-100(%rsi),%r9
movq 208-100(%rsi),%rdx
movq 216-100(%rsi),%rcx
jmp .Loop_absorb
.align 32
.Ldone_absorb:
movq %rdx,%rax
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq 280(%rsp),%r11
.cfi_def_cfa %r11,8
movq -48(%r11),%r15
movq -40(%r11),%r14
movq -32(%r11),%r13
movq -24(%r11),%r12
movq -16(%r11),%rbp
movq -8(%r11),%rbx
leaq (%r11),%rsp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.size SHA3_absorb,.-SHA3_absorb
.globl SHA3_squeeze
.type SHA3_squeeze,@function
.align 32
SHA3_squeeze:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-32
subq $32,%rsp
.cfi_adjust_cfa_offset 32
shrq $3,%rcx
movq %rdi,%r8
movq %rsi,%r12
movq %rdx,%r13
movq %rcx,%r14
jmp .Loop_squeeze
.align 32
.Loop_squeeze:
cmpq $8,%r13
jb .Ltail_squeeze
movq (%r8),%rax
leaq 8(%r8),%r8
movq %rax,(%r12)
leaq 8(%r12),%r12
subq $8,%r13
jz .Ldone_squeeze
subq $1,%rcx
jnz .Loop_squeeze
movq %rdi,%rcx
call KeccakF1600
movq %rdi,%r8
movq %r14,%rcx
jmp .Loop_squeeze
.Ltail_squeeze:
movq %r8,%rsi
movq %r12,%rdi
movq %r13,%rcx
.byte 0xf3,0xa4
.Ldone_squeeze:
movq 32(%rsp),%r14
movq 40(%rsp),%r13
movq 48(%rsp),%r12
addq $56,%rsp
.cfi_adjust_cfa_offset -56
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.byte 0xf3,0xc3
.cfi_endproc
.size SHA3_squeeze,.-SHA3_squeeze
.align 256
.quad 0,0,0,0,0,0,0,0
.type iotas,@object
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.size iotas,.-iotas
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .note.gnu.property,"a",@note
.long 4,2f-1f,5
.byte 0x47,0x4E,0x55,0
1: .long 0xc0000002,4,3
.align 8
2:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.