repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
mktmansour/MKT-KSA-Geolocation-Security
| 7,463
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/ml-kem-768/aarch64/__asm_poly.S
|
/*
* We offer
* CC0 1.0 Universal or the following MIT License for this file.
* You may freely choose one of them that applies.
*
* MIT License
*
* Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang
* Copyright (c) 2023: Vincent Hwang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "macros.inc"
.align 2
.global PQCLEAN_MLKEM768_AARCH64__asm_add_reduce
.global _PQCLEAN_MLKEM768_AARCH64__asm_add_reduce
PQCLEAN_MLKEM768_AARCH64__asm_add_reduce:
_PQCLEAN_MLKEM768_AARCH64__asm_add_reduce:
mov w4, #3329
mov w5, #25519
add x2, x0, #0
dup v0.8H, w4
dup v1.8H, w5
ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64
ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64
ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64
ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64
add v4.8H, v16.8H, v24.8H
add v5.8H, v17.8H, v25.8H
add v6.8H, v18.8H, v26.8H
add v7.8H, v19.8H, v27.8H
add v16.8H, v20.8H, v28.8H
add v17.8H, v21.8H, v29.8H
add v18.8H, v22.8H, v30.8H
add v19.8H, v23.8H, v31.8H
oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0
mov x15, #3
_add_reduce_loop:
st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64
ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64
st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64
ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64
ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64
ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64
add v4.8H, v16.8H, v24.8H
add v5.8H, v17.8H, v25.8H
add v6.8H, v18.8H, v26.8H
add v7.8H, v19.8H, v27.8H
add v16.8H, v20.8H, v28.8H
add v17.8H, v21.8H, v29.8H
add v18.8H, v22.8H, v30.8H
add v19.8H, v23.8H, v31.8H
oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0
sub x15, x15, #1
cbnz x15, _add_reduce_loop
st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64
st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64
ret
.align 2
.global PQCLEAN_MLKEM768_AARCH64__asm_sub_reduce
.global _PQCLEAN_MLKEM768_AARCH64__asm_sub_reduce
PQCLEAN_MLKEM768_AARCH64__asm_sub_reduce:
_PQCLEAN_MLKEM768_AARCH64__asm_sub_reduce:
mov w4, #3329
mov w5, #25519
add x2, x0, #0
dup v0.8H, w4
dup v1.8H, w5
ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64
ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64
ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64
ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64
sub v4.8H, v16.8H, v24.8H
sub v5.8H, v17.8H, v25.8H
sub v6.8H, v18.8H, v26.8H
sub v7.8H, v19.8H, v27.8H
sub v16.8H, v20.8H, v28.8H
sub v17.8H, v21.8H, v29.8H
sub v18.8H, v22.8H, v30.8H
sub v19.8H, v23.8H, v31.8H
oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0
mov x15, #3
_sub_reduce_loop:
st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64
ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64
st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64
ld1 {v28.8H, v29.8H, v30.8H, v31.8H}, [x1], #64
ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x2], #64
ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x2], #64
sub v4.8H, v16.8H, v24.8H
sub v5.8H, v17.8H, v25.8H
sub v6.8H, v18.8H, v26.8H
sub v7.8H, v19.8H, v27.8H
sub v16.8H, v20.8H, v28.8H
sub v17.8H, v21.8H, v29.8H
sub v18.8H, v22.8H, v30.8H
sub v19.8H, v23.8H, v31.8H
oo_barrett v4, v5, v6, v7, v20, v21, v22, v23, v16, v17, v18, v19, v24, v25, v26, v27, v1, #11, v0
sub x15, x15, #1
cbnz x15, _sub_reduce_loop
st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64
st1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #64
ret
.align 2
.global PQCLEAN_MLKEM768_AARCH64__asm_add_add_reduce
.global _PQCLEAN_MLKEM768_AARCH64__asm_add_add_reduce
PQCLEAN_MLKEM768_AARCH64__asm_add_add_reduce:
_PQCLEAN_MLKEM768_AARCH64__asm_add_add_reduce:
mov w4, #3329
mov w5, #25519
add x3, x0, #0
dup v0.8H, w4
dup v1.8H, w5
ld1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x3], #64
ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x3], #64
ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x1], #64
ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64
add v4.8H, v4.8H, v16.8H
add v5.8H, v5.8H, v17.8H
ld1 {v16.8H, v17.8H}, [x2], #32
add v6.8H, v6.8H, v18.8H
add v7.8H, v7.8H, v19.8H
ld1 {v18.8H, v19.8H}, [x2], #32
add v20.8H, v20.8H, v24.8H
add v21.8H, v21.8H, v25.8H
ld1 {v24.8H, v25.8H}, [x2], #32
add v22.8H, v22.8H, v26.8H
add v23.8H, v23.8H, v27.8H
ld1 {v26.8H, v27.8H}, [x2], #32
add v4.8H, v4.8H, v16.8H
add v5.8H, v5.8H, v17.8H
add v6.8H, v6.8H, v18.8H
add v7.8H, v7.8H, v19.8H
add v20.8H, v20.8H, v24.8H
add v21.8H, v21.8H, v25.8H
add v22.8H, v22.8H, v26.8H
add v23.8H, v23.8H, v27.8H
oo_barrett v4, v5, v6, v7, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v1, #11, v0
mov x15, #3
_add_add_reduce_loop:
st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64
ld1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x3], #64
st1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x0], #64
ld1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x3], #64
ld1 {v16.8H, v17.8H, v18.8H, v19.8H}, [x1], #64
ld1 {v24.8H, v25.8H, v26.8H, v27.8H}, [x1], #64
add v4.8H, v4.8H, v16.8H
add v5.8H, v5.8H, v17.8H
ld1 {v16.8H, v17.8H}, [x2], #32
add v6.8H, v6.8H, v18.8H
add v7.8H, v7.8H, v19.8H
ld1 {v18.8H, v19.8H}, [x2], #32
add v20.8H, v20.8H, v24.8H
add v21.8H, v21.8H, v25.8H
ld1 {v24.8H, v25.8H}, [x2], #32
add v22.8H, v22.8H, v26.8H
add v23.8H, v23.8H, v27.8H
ld1 {v26.8H, v27.8H}, [x2], #32
add v4.8H, v4.8H, v16.8H
add v5.8H, v5.8H, v17.8H
add v6.8H, v6.8H, v18.8H
add v7.8H, v7.8H, v19.8H
add v20.8H, v20.8H, v24.8H
add v21.8H, v21.8H, v25.8H
add v22.8H, v22.8H, v26.8H
add v23.8H, v23.8H, v27.8H
oo_barrett v4, v5, v6, v7, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v1, #11, v0
sub x15, x15, #1
cbnz x15, _add_add_reduce_loop
st1 { v4.8H, v5.8H, v6.8H, v7.8H}, [x0], #64
st1 {v20.8H, v21.8H, v22.8H, v23.8H}, [x0], #64
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 76,935
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/vec256_ama_asm.S
|
#include "namespace.h"
#define vec256_ama_asm CRYPTO_NAMESPACE(vec256_ama_asm)
#define _vec256_ama_asm _CRYPTO_NAMESPACE(vec256_ama_asm)
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: reg256 a0
# qhasm: reg256 a1
# qhasm: reg256 a2
# qhasm: reg256 a3
# qhasm: reg256 a4
# qhasm: reg256 a5
# qhasm: reg256 a6
# qhasm: reg256 a7
# qhasm: reg256 a8
# qhasm: reg256 a9
# qhasm: reg256 a10
# qhasm: reg256 a11
# qhasm: reg256 a12
# qhasm: reg256 b0
# qhasm: reg256 b1
# qhasm: reg256 r0
# qhasm: reg256 r1
# qhasm: reg256 r2
# qhasm: reg256 r3
# qhasm: reg256 r4
# qhasm: reg256 r5
# qhasm: reg256 r6
# qhasm: reg256 r7
# qhasm: reg256 r8
# qhasm: reg256 r9
# qhasm: reg256 r10
# qhasm: reg256 r11
# qhasm: reg256 r12
# qhasm: reg256 r13
# qhasm: reg256 r14
# qhasm: reg256 r15
# qhasm: reg256 r16
# qhasm: reg256 r17
# qhasm: reg256 r18
# qhasm: reg256 r19
# qhasm: reg256 r20
# qhasm: reg256 r21
# qhasm: reg256 r22
# qhasm: reg256 r23
# qhasm: reg256 r24
# qhasm: reg256 r
# qhasm: enter vec256_ama_asm
.p2align 5
.global _vec256_ama_asm
.global vec256_ama_asm
_vec256_ama_asm:
vec256_ama_asm:
mov % rsp, % r11
and $31, % r11
add $0, % r11
sub % r11, % rsp
# qhasm: b0 = mem256[ input_2 + 0 ]
# asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1
# asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0
vmovupd 0( % rdx), % ymm0
# qhasm: a12 = mem256[ input_0 + 384 ]
# asm 1: vmovupd 384(<input_0=int64#1),>a12=reg256#2
# asm 2: vmovupd 384(<input_0=%rdi),>a12=%ymm1
vmovupd 384( % rdi), % ymm1
# qhasm: a12 = a12 ^ mem256[ input_1 + 384 ]
# asm 1: vpxor 384(<input_1=int64#2),<a12=reg256#2,>a12=reg256#2
# asm 2: vpxor 384(<input_1=%rsi),<a12=%ymm1,>a12=%ymm1
vpxor 384( % rsi), % ymm1, % ymm1
# qhasm: mem256[ input_0 + 384 ] = a12
# asm 1: vmovupd <a12=reg256#2,384(<input_0=int64#1)
# asm 2: vmovupd <a12=%ymm1,384(<input_0=%rdi)
vmovupd % ymm1, 384( % rdi)
# qhasm: r12 = a12 & b0
# asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3
# asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2
vpand % ymm1, % ymm0, % ymm2
# qhasm: r13 = a12 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4
# asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3
vpand 32( % rdx), % ymm1, % ymm3
# qhasm: r14 = a12 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5
# asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4
vpand 64( % rdx), % ymm1, % ymm4
# qhasm: r15 = a12 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6
# asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5
vpand 96( % rdx), % ymm1, % ymm5
# qhasm: r16 = a12 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7
# asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6
vpand 128( % rdx), % ymm1, % ymm6
# qhasm: r17 = a12 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8
# asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7
vpand 160( % rdx), % ymm1, % ymm7
# qhasm: r18 = a12 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9
# asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8
vpand 192( % rdx), % ymm1, % ymm8
# qhasm: r19 = a12 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10
# asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9
vpand 224( % rdx), % ymm1, % ymm9
# qhasm: r20 = a12 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11
# asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10
vpand 256( % rdx), % ymm1, % ymm10
# qhasm: r21 = a12 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12
# asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11
vpand 288( % rdx), % ymm1, % ymm11
# qhasm: r22 = a12 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13
# asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12
vpand 320( % rdx), % ymm1, % ymm12
# qhasm: r23 = a12 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14
# asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13
vpand 352( % rdx), % ymm1, % ymm13
# qhasm: r24 = a12 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2
# asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1
vpand 384( % rdx), % ymm1, % ymm1
# qhasm: r15 ^= r24
# asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5
vpxor % ymm1, % ymm5, % ymm5
# qhasm: r14 ^= r24
# asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4
vpxor % ymm1, % ymm4, % ymm4
# qhasm: r12 ^= r24
# asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2
vpxor % ymm1, % ymm2, % ymm2
# qhasm: r11 = r24
# asm 1: vmovapd <r24=reg256#2,>r11=reg256#2
# asm 2: vmovapd <r24=%ymm1,>r11=%ymm1
vmovapd % ymm1, % ymm1
# qhasm: a11 = mem256[ input_0 + 352 ]
# asm 1: vmovupd 352(<input_0=int64#1),>a11=reg256#15
# asm 2: vmovupd 352(<input_0=%rdi),>a11=%ymm14
vmovupd 352( % rdi), % ymm14
# qhasm: a11 = a11 ^ mem256[ input_1 + 352 ]
# asm 1: vpxor 352(<input_1=int64#2),<a11=reg256#15,>a11=reg256#15
# asm 2: vpxor 352(<input_1=%rsi),<a11=%ymm14,>a11=%ymm14
vpxor 352( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 352 ] = a11
# asm 1: vmovupd <a11=reg256#15,352(<input_0=int64#1)
# asm 2: vmovupd <a11=%ymm14,352(<input_0=%rdi)
vmovupd % ymm14, 352( % rdi)
# qhasm: r = a11 & b0
# asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a11 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a11 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a11 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a11 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a11 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a11 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a11 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a11 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a11 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a11 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r21 ^= r
# asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12
# asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a11 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r22 ^= r
# asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13
# asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a11 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r23 ^= r
# asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14
# asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13
vpxor % ymm14, % ymm13, % ymm13
# qhasm: r14 ^= r23
# asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4
vpxor % ymm13, % ymm4, % ymm4
# qhasm: r13 ^= r23
# asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3
vpxor % ymm13, % ymm3, % ymm3
# qhasm: r11 ^= r23
# asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1
vpxor % ymm13, % ymm1, % ymm1
# qhasm: r10 = r23
# asm 1: vmovapd <r23=reg256#14,>r10=reg256#14
# asm 2: vmovapd <r23=%ymm13,>r10=%ymm13
vmovapd % ymm13, % ymm13
# qhasm: a10 = mem256[ input_0 + 320 ]
# asm 1: vmovupd 320(<input_0=int64#1),>a10=reg256#15
# asm 2: vmovupd 320(<input_0=%rdi),>a10=%ymm14
vmovupd 320( % rdi), % ymm14
# qhasm: a10 = a10 ^ mem256[ input_1 + 320 ]
# asm 1: vpxor 320(<input_1=int64#2),<a10=reg256#15,>a10=reg256#15
# asm 2: vpxor 320(<input_1=%rsi),<a10=%ymm14,>a10=%ymm14
vpxor 320( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 320 ] = a10
# asm 1: vmovupd <a10=reg256#15,320(<input_0=int64#1)
# asm 2: vmovupd <a10=%ymm14,320(<input_0=%rdi)
vmovupd % ymm14, 320( % rdi)
# qhasm: r = a10 & b0
# asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a10 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a10 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a10 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a10 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a10 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a10 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a10 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a10 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a10 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a10 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a10 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r21 ^= r
# asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12
# asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a10 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r22 ^= r
# asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13
# asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12
vpxor % ymm14, % ymm12, % ymm12
# qhasm: r13 ^= r22
# asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3
vpxor % ymm12, % ymm3, % ymm3
# qhasm: r12 ^= r22
# asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2
vpxor % ymm12, % ymm2, % ymm2
# qhasm: r10 ^= r22
# asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13
vpxor % ymm12, % ymm13, % ymm13
# qhasm: r9 = r22
# asm 1: vmovapd <r22=reg256#13,>r9=reg256#13
# asm 2: vmovapd <r22=%ymm12,>r9=%ymm12
vmovapd % ymm12, % ymm12
# qhasm: a9 = mem256[ input_0 + 288 ]
# asm 1: vmovupd 288(<input_0=int64#1),>a9=reg256#15
# asm 2: vmovupd 288(<input_0=%rdi),>a9=%ymm14
vmovupd 288( % rdi), % ymm14
# qhasm: a9 = a9 ^ mem256[ input_1 + 288 ]
# asm 1: vpxor 288(<input_1=int64#2),<a9=reg256#15,>a9=reg256#15
# asm 2: vpxor 288(<input_1=%rsi),<a9=%ymm14,>a9=%ymm14
vpxor 288( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 288 ] = a9
# asm 1: vmovupd <a9=reg256#15,288(<input_0=int64#1)
# asm 2: vmovupd <a9=%ymm14,288(<input_0=%rdi)
vmovupd % ymm14, 288( % rdi)
# qhasm: r = a9 & b0
# asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a9 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a9 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a9 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a9 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a9 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a9 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a9 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a9 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a9 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a9 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a9 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a9 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r21 ^= r
# asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12
# asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11
vpxor % ymm14, % ymm11, % ymm11
# qhasm: r12 ^= r21
# asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2
vpxor % ymm11, % ymm2, % ymm2
# qhasm: r11 ^= r21
# asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1
vpxor % ymm11, % ymm1, % ymm1
# qhasm: r9 ^= r21
# asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12
vpxor % ymm11, % ymm12, % ymm12
# qhasm: r8 = r21
# asm 1: vmovapd <r21=reg256#12,>r8=reg256#12
# asm 2: vmovapd <r21=%ymm11,>r8=%ymm11
vmovapd % ymm11, % ymm11
# qhasm: a8 = mem256[ input_0 + 256 ]
# asm 1: vmovupd 256(<input_0=int64#1),>a8=reg256#15
# asm 2: vmovupd 256(<input_0=%rdi),>a8=%ymm14
vmovupd 256( % rdi), % ymm14
# qhasm: a8 = a8 ^ mem256[ input_1 + 256 ]
# asm 1: vpxor 256(<input_1=int64#2),<a8=reg256#15,>a8=reg256#15
# asm 2: vpxor 256(<input_1=%rsi),<a8=%ymm14,>a8=%ymm14
vpxor 256( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 256 ] = a8
# asm 1: vmovupd <a8=reg256#15,256(<input_0=int64#1)
# asm 2: vmovupd <a8=%ymm14,256(<input_0=%rdi)
vmovupd % ymm14, 256( % rdi)
# qhasm: r = a8 & b0
# asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a8 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a8 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a8 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a8 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a8 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a8 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a8 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a8 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a8 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a8 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a8 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a8 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10
vpxor % ymm14, % ymm10, % ymm10
# qhasm: r11 ^= r20
# asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1
vpxor % ymm10, % ymm1, % ymm1
# qhasm: r10 ^= r20
# asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13
vpxor % ymm10, % ymm13, % ymm13
# qhasm: r8 ^= r20
# asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11
vpxor % ymm10, % ymm11, % ymm11
# qhasm: r7 = r20
# asm 1: vmovapd <r20=reg256#11,>r7=reg256#11
# asm 2: vmovapd <r20=%ymm10,>r7=%ymm10
vmovapd % ymm10, % ymm10
# qhasm: a7 = mem256[ input_0 + 224 ]
# asm 1: vmovupd 224(<input_0=int64#1),>a7=reg256#15
# asm 2: vmovupd 224(<input_0=%rdi),>a7=%ymm14
vmovupd 224( % rdi), % ymm14
# qhasm: a7 = a7 ^ mem256[ input_1 + 224 ]
# asm 1: vpxor 224(<input_1=int64#2),<a7=reg256#15,>a7=reg256#15
# asm 2: vpxor 224(<input_1=%rsi),<a7=%ymm14,>a7=%ymm14
vpxor 224( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 224 ] = a7
# asm 1: vmovupd <a7=reg256#15,224(<input_0=int64#1)
# asm 2: vmovupd <a7=%ymm14,224(<input_0=%rdi)
vmovupd % ymm14, 224( % rdi)
# qhasm: r = a7 & b0
# asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a7 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a7 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a7 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a7 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a7 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a7 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a7 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a7 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a7 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a7 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a7 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a7 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9
vpxor % ymm14, % ymm9, % ymm9
# qhasm: r10 ^= r19
# asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13
vpxor % ymm9, % ymm13, % ymm13
# qhasm: r9 ^= r19
# asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12
vpxor % ymm9, % ymm12, % ymm12
# qhasm: r7 ^= r19
# asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10
vpxor % ymm9, % ymm10, % ymm10
# qhasm: r6 = r19
# asm 1: vmovapd <r19=reg256#10,>r6=reg256#10
# asm 2: vmovapd <r19=%ymm9,>r6=%ymm9
vmovapd % ymm9, % ymm9
# qhasm: a6 = mem256[ input_0 + 192 ]
# asm 1: vmovupd 192(<input_0=int64#1),>a6=reg256#15
# asm 2: vmovupd 192(<input_0=%rdi),>a6=%ymm14
vmovupd 192( % rdi), % ymm14
# qhasm: a6 = a6 ^ mem256[ input_1 + 192 ]
# asm 1: vpxor 192(<input_1=int64#2),<a6=reg256#15,>a6=reg256#15
# asm 2: vpxor 192(<input_1=%rsi),<a6=%ymm14,>a6=%ymm14
vpxor 192( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 192 ] = a6
# asm 1: vmovupd <a6=reg256#15,192(<input_0=int64#1)
# asm 2: vmovupd <a6=%ymm14,192(<input_0=%rdi)
vmovupd % ymm14, 192( % rdi)
# qhasm: r = a6 & b0
# asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a6 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a6 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a6 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a6 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a6 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a6 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a6 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a6 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a6 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a6 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a6 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a6 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8
vpxor % ymm14, % ymm8, % ymm8
# qhasm: r9 ^= r18
# asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12
vpxor % ymm8, % ymm12, % ymm12
# qhasm: r8 ^= r18
# asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11
vpxor % ymm8, % ymm11, % ymm11
# qhasm: r6 ^= r18
# asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9
vpxor % ymm8, % ymm9, % ymm9
# qhasm: r5 = r18
# asm 1: vmovapd <r18=reg256#9,>r5=reg256#9
# asm 2: vmovapd <r18=%ymm8,>r5=%ymm8
vmovapd % ymm8, % ymm8
# qhasm: a5 = mem256[ input_0 + 160 ]
# asm 1: vmovupd 160(<input_0=int64#1),>a5=reg256#15
# asm 2: vmovupd 160(<input_0=%rdi),>a5=%ymm14
vmovupd 160( % rdi), % ymm14
# qhasm: a5 = a5 ^ mem256[ input_1 + 160 ]
# asm 1: vpxor 160(<input_1=int64#2),<a5=reg256#15,>a5=reg256#15
# asm 2: vpxor 160(<input_1=%rsi),<a5=%ymm14,>a5=%ymm14
vpxor 160( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 160 ] = a5
# asm 1: vmovupd <a5=reg256#15,160(<input_0=int64#1)
# asm 2: vmovupd <a5=%ymm14,160(<input_0=%rdi)
vmovupd % ymm14, 160( % rdi)
# qhasm: r = a5 & b0
# asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a5 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a5 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a5 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a5 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a5 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a5 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a5 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a5 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a5 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a5 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a5 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a5 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7
vpxor % ymm14, % ymm7, % ymm7
# qhasm: r8 ^= r17
# asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11
vpxor % ymm7, % ymm11, % ymm11
# qhasm: r7 ^= r17
# asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10
vpxor % ymm7, % ymm10, % ymm10
# qhasm: r5 ^= r17
# asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8
vpxor % ymm7, % ymm8, % ymm8
# qhasm: r4 = r17
# asm 1: vmovapd <r17=reg256#8,>r4=reg256#8
# asm 2: vmovapd <r17=%ymm7,>r4=%ymm7
vmovapd % ymm7, % ymm7
# qhasm: a4 = mem256[ input_0 + 128 ]
# asm 1: vmovupd 128(<input_0=int64#1),>a4=reg256#15
# asm 2: vmovupd 128(<input_0=%rdi),>a4=%ymm14
vmovupd 128( % rdi), % ymm14
# qhasm: a4 = a4 ^ mem256[ input_1 + 128 ]
# asm 1: vpxor 128(<input_1=int64#2),<a4=reg256#15,>a4=reg256#15
# asm 2: vpxor 128(<input_1=%rsi),<a4=%ymm14,>a4=%ymm14
vpxor 128( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 128 ] = a4
# asm 1: vmovupd <a4=reg256#15,128(<input_0=int64#1)
# asm 2: vmovupd <a4=%ymm14,128(<input_0=%rdi)
vmovupd % ymm14, 128( % rdi)
# qhasm: r = a4 & b0
# asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a4 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a4 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a4 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a4 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a4 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a4 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a4 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a4 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a4 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a4 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a4 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a4 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6
vpxor % ymm14, % ymm6, % ymm6
# qhasm: r7 ^= r16
# asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10
vpxor % ymm6, % ymm10, % ymm10
# qhasm: r6 ^= r16
# asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9
vpxor % ymm6, % ymm9, % ymm9
# qhasm: r4 ^= r16
# asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7
vpxor % ymm6, % ymm7, % ymm7
# qhasm: r3 = r16
# asm 1: vmovapd <r16=reg256#7,>r3=reg256#7
# asm 2: vmovapd <r16=%ymm6,>r3=%ymm6
vmovapd % ymm6, % ymm6
# qhasm: a3 = mem256[ input_0 + 96 ]
# asm 1: vmovupd 96(<input_0=int64#1),>a3=reg256#15
# asm 2: vmovupd 96(<input_0=%rdi),>a3=%ymm14
vmovupd 96( % rdi), % ymm14
# qhasm: a3 = a3 ^ mem256[ input_1 + 96 ]
# asm 1: vpxor 96(<input_1=int64#2),<a3=reg256#15,>a3=reg256#15
# asm 2: vpxor 96(<input_1=%rsi),<a3=%ymm14,>a3=%ymm14
vpxor 96( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 96 ] = a3
# asm 1: vmovupd <a3=reg256#15,96(<input_0=int64#1)
# asm 2: vmovupd <a3=%ymm14,96(<input_0=%rdi)
vmovupd % ymm14, 96( % rdi)
# qhasm: r = a3 & b0
# asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a3 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a3 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a3 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a3 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a3 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a3 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a3 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a3 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a3 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a3 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a3 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a3 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5
vpxor % ymm14, % ymm5, % ymm5
# qhasm: r6 ^= r15
# asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9
vpxor % ymm5, % ymm9, % ymm9
# qhasm: r5 ^= r15
# asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8
vpxor % ymm5, % ymm8, % ymm8
# qhasm: r3 ^= r15
# asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6
vpxor % ymm5, % ymm6, % ymm6
# qhasm: r2 = r15
# asm 1: vmovapd <r15=reg256#6,>r2=reg256#6
# asm 2: vmovapd <r15=%ymm5,>r2=%ymm5
vmovapd % ymm5, % ymm5
# qhasm: a2 = mem256[ input_0 + 64 ]
# asm 1: vmovupd 64(<input_0=int64#1),>a2=reg256#15
# asm 2: vmovupd 64(<input_0=%rdi),>a2=%ymm14
vmovupd 64( % rdi), % ymm14
# qhasm: a2 = a2 ^ mem256[ input_1 + 64 ]
# asm 1: vpxor 64(<input_1=int64#2),<a2=reg256#15,>a2=reg256#15
# asm 2: vpxor 64(<input_1=%rsi),<a2=%ymm14,>a2=%ymm14
vpxor 64( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 64 ] = a2
# asm 1: vmovupd <a2=reg256#15,64(<input_0=int64#1)
# asm 2: vmovupd <a2=%ymm14,64(<input_0=%rdi)
vmovupd % ymm14, 64( % rdi)
# qhasm: r = a2 & b0
# asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a2 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a2 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a2 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a2 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a2 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a2 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a2 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a2 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a2 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a2 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a2 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a2 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4
vpxor % ymm14, % ymm4, % ymm4
# qhasm: r5 ^= r14
# asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8
vpxor % ymm4, % ymm8, % ymm8
# qhasm: r4 ^= r14
# asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7
vpxor % ymm4, % ymm7, % ymm7
# qhasm: r2 ^= r14
# asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5
vpxor % ymm4, % ymm5, % ymm5
# qhasm: r1 = r14
# asm 1: vmovapd <r14=reg256#5,>r1=reg256#5
# asm 2: vmovapd <r14=%ymm4,>r1=%ymm4
vmovapd % ymm4, % ymm4
# qhasm: a1 = mem256[ input_0 + 32 ]
# asm 1: vmovupd 32(<input_0=int64#1),>a1=reg256#15
# asm 2: vmovupd 32(<input_0=%rdi),>a1=%ymm14
vmovupd 32( % rdi), % ymm14
# qhasm: a1 = a1 ^ mem256[ input_1 + 32 ]
# asm 1: vpxor 32(<input_1=int64#2),<a1=reg256#15,>a1=reg256#15
# asm 2: vpxor 32(<input_1=%rsi),<a1=%ymm14,>a1=%ymm14
vpxor 32( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 32 ] = a1
# asm 1: vmovupd <a1=reg256#15,32(<input_0=int64#1)
# asm 2: vmovupd <a1=%ymm14,32(<input_0=%rdi)
vmovupd % ymm14, 32( % rdi)
# qhasm: r = a1 & b0
# asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r1 ^= r
# asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5
# asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a1 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a1 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a1 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a1 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a1 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a1 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a1 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a1 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a1 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a1 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a1 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a1 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3
vpxor % ymm14, % ymm3, % ymm3
# qhasm: r4 ^= r13
# asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7
vpxor % ymm3, % ymm7, % ymm7
# qhasm: r3 ^= r13
# asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6
vpxor % ymm3, % ymm6, % ymm6
# qhasm: r1 ^= r13
# asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5
# asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4
vpxor % ymm3, % ymm4, % ymm4
# qhasm: r0 = r13
# asm 1: vmovapd <r13=reg256#4,>r0=reg256#4
# asm 2: vmovapd <r13=%ymm3,>r0=%ymm3
vmovapd % ymm3, % ymm3
# qhasm: a0 = mem256[ input_0 + 0 ]
# asm 1: vmovupd 0(<input_0=int64#1),>a0=reg256#15
# asm 2: vmovupd 0(<input_0=%rdi),>a0=%ymm14
vmovupd 0( % rdi), % ymm14
# qhasm: a0 = a0 ^ mem256[ input_1 + 0 ]
# asm 1: vpxor 0(<input_1=int64#2),<a0=reg256#15,>a0=reg256#15
# asm 2: vpxor 0(<input_1=%rsi),<a0=%ymm14,>a0=%ymm14
vpxor 0( % rsi), % ymm14, % ymm14
# qhasm: mem256[ input_0 + 0 ] = a0
# asm 1: vmovupd <a0=reg256#15,0(<input_0=int64#1)
# asm 2: vmovupd <a0=%ymm14,0(<input_0=%rdi)
vmovupd % ymm14, 0( % rdi)
# qhasm: r = a0 & b0
# asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1
# asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0
vpand % ymm14, % ymm0, % ymm0
# qhasm: r0 ^= r
# asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4
# asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3
vpxor % ymm0, % ymm3, % ymm3
# qhasm: r = a0 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 32( % rdx), % ymm14, % ymm0
# qhasm: r1 ^= r
# asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5
# asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4
vpxor % ymm0, % ymm4, % ymm4
# qhasm: r = a0 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 64( % rdx), % ymm14, % ymm0
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5
vpxor % ymm0, % ymm5, % ymm5
# qhasm: r = a0 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 96( % rdx), % ymm14, % ymm0
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6
vpxor % ymm0, % ymm6, % ymm6
# qhasm: r = a0 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 128( % rdx), % ymm14, % ymm0
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7
vpxor % ymm0, % ymm7, % ymm7
# qhasm: r = a0 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 160( % rdx), % ymm14, % ymm0
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8
vpxor % ymm0, % ymm8, % ymm8
# qhasm: r = a0 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 192( % rdx), % ymm14, % ymm0
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9
vpxor % ymm0, % ymm9, % ymm9
# qhasm: r = a0 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 224( % rdx), % ymm14, % ymm0
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10
vpxor % ymm0, % ymm10, % ymm10
# qhasm: r = a0 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 256( % rdx), % ymm14, % ymm0
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11
vpxor % ymm0, % ymm11, % ymm11
# qhasm: r = a0 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 288( % rdx), % ymm14, % ymm0
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12
vpxor % ymm0, % ymm12, % ymm12
# qhasm: r = a0 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 320( % rdx), % ymm14, % ymm0
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13
vpxor % ymm0, % ymm13, % ymm13
# qhasm: r = a0 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 352( % rdx), % ymm14, % ymm0
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1
vpxor % ymm0, % ymm1, % ymm1
# qhasm: r = a0 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 384( % rdx), % ymm14, % ymm0
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2
vpxor % ymm0, % ymm2, % ymm2
# qhasm: r12 = r12 ^ mem256[ input_1 + 384 ]
# asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#3,>r12=reg256#1
# asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm2,>r12=%ymm0
vpxor 384( % rsi), % ymm2, % ymm0
# qhasm: mem256[ input_1 + 384 ] = r12
# asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2)
# asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi)
vmovupd % ymm0, 384( % rsi)
# qhasm: r11 = r11 ^ mem256[ input_1 + 352 ]
# asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#2,>r11=reg256#1
# asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm1,>r11=%ymm0
vpxor 352( % rsi), % ymm1, % ymm0
# qhasm: mem256[ input_1 + 352 ] = r11
# asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2)
# asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi)
vmovupd % ymm0, 352( % rsi)
# qhasm: r10 = r10 ^ mem256[ input_1 + 320 ]
# asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#14,>r10=reg256#1
# asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm13,>r10=%ymm0
vpxor 320( % rsi), % ymm13, % ymm0
# qhasm: mem256[ input_1 + 320 ] = r10
# asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2)
# asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi)
vmovupd % ymm0, 320( % rsi)
# qhasm: r9 = r9 ^ mem256[ input_1 + 288 ]
# asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#13,>r9=reg256#1
# asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm12,>r9=%ymm0
vpxor 288( % rsi), % ymm12, % ymm0
# qhasm: mem256[ input_1 + 288 ] = r9
# asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2)
# asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi)
vmovupd % ymm0, 288( % rsi)
# qhasm: r8 = r8 ^ mem256[ input_1 + 256 ]
# asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#12,>r8=reg256#1
# asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm11,>r8=%ymm0
vpxor 256( % rsi), % ymm11, % ymm0
# qhasm: mem256[ input_1 + 256 ] = r8
# asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2)
# asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi)
vmovupd % ymm0, 256( % rsi)
# qhasm: r7 = r7 ^ mem256[ input_1 + 224 ]
# asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#11,>r7=reg256#1
# asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm10,>r7=%ymm0
vpxor 224( % rsi), % ymm10, % ymm0
# qhasm: mem256[ input_1 + 224 ] = r7
# asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2)
# asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi)
vmovupd % ymm0, 224( % rsi)
# qhasm: r6 = r6 ^ mem256[ input_1 + 192 ]
# asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#10,>r6=reg256#1
# asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm9,>r6=%ymm0
vpxor 192( % rsi), % ymm9, % ymm0
# qhasm: mem256[ input_1 + 192 ] = r6
# asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2)
# asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi)
vmovupd % ymm0, 192( % rsi)
# qhasm: r5 = r5 ^ mem256[ input_1 + 160 ]
# asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#9,>r5=reg256#1
# asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm8,>r5=%ymm0
vpxor 160( % rsi), % ymm8, % ymm0
# qhasm: mem256[ input_1 + 160 ] = r5
# asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2)
# asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi)
vmovupd % ymm0, 160( % rsi)
# qhasm: r4 = r4 ^ mem256[ input_1 + 128 ]
# asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#8,>r4=reg256#1
# asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm7,>r4=%ymm0
vpxor 128( % rsi), % ymm7, % ymm0
# qhasm: mem256[ input_1 + 128 ] = r4
# asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2)
# asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi)
vmovupd % ymm0, 128( % rsi)
# qhasm: r3 = r3 ^ mem256[ input_1 + 96 ]
# asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#7,>r3=reg256#1
# asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm6,>r3=%ymm0
vpxor 96( % rsi), % ymm6, % ymm0
# qhasm: mem256[ input_1 + 96 ] = r3
# asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2)
# asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi)
vmovupd % ymm0, 96( % rsi)
# qhasm: r2 = r2 ^ mem256[ input_1 + 64 ]
# asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#6,>r2=reg256#1
# asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm5,>r2=%ymm0
vpxor 64( % rsi), % ymm5, % ymm0
# qhasm: mem256[ input_1 + 64 ] = r2
# asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2)
# asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi)
vmovupd % ymm0, 64( % rsi)
# qhasm: r1 = r1 ^ mem256[ input_1 + 32 ]
# asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#5,>r1=reg256#1
# asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm4,>r1=%ymm0
vpxor 32( % rsi), % ymm4, % ymm0
# qhasm: mem256[ input_1 + 32 ] = r1
# asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2)
# asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi)
vmovupd % ymm0, 32( % rsi)
# qhasm: r0 = r0 ^ mem256[ input_1 + 0 ]
# asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#4,>r0=reg256#1
# asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm3,>r0=%ymm0
vpxor 0( % rsi), % ymm3, % ymm0
# qhasm: mem256[ input_1 + 0 ] = r0
# asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2)
# asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi)
vmovupd % ymm0, 0( % rsi)
# qhasm: return
add % r11, % rsp
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 262,634
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/transpose_64x64_asm.S
|
#include "namespace.h"
#define MASK0_0 CRYPTO_NAMESPACE(MASK0_0)
#define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0)
#define MASK0_1 CRYPTO_NAMESPACE(MASK0_1)
#define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1)
#define MASK1_0 CRYPTO_NAMESPACE(MASK1_0)
#define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0)
#define MASK1_1 CRYPTO_NAMESPACE(MASK1_1)
#define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1)
#define MASK2_0 CRYPTO_NAMESPACE(MASK2_0)
#define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0)
#define MASK2_1 CRYPTO_NAMESPACE(MASK2_1)
#define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1)
#define MASK3_0 CRYPTO_NAMESPACE(MASK3_0)
#define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0)
#define MASK3_1 CRYPTO_NAMESPACE(MASK3_1)
#define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1)
#define MASK4_0 CRYPTO_NAMESPACE(MASK4_0)
#define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0)
#define MASK4_1 CRYPTO_NAMESPACE(MASK4_1)
#define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1)
#define MASK5_0 CRYPTO_NAMESPACE(MASK5_0)
#define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0)
#define MASK5_1 CRYPTO_NAMESPACE(MASK5_1)
#define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1)
#define transpose_64x64_asm CRYPTO_NAMESPACE(transpose_64x64_asm)
#define _transpose_64x64_asm _CRYPTO_NAMESPACE(transpose_64x64_asm)
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: reg128 r0
# qhasm: reg128 r1
# qhasm: reg128 r2
# qhasm: reg128 r3
# qhasm: reg128 r4
# qhasm: reg128 r5
# qhasm: reg128 r6
# qhasm: reg128 r7
# qhasm: reg128 t0
# qhasm: reg128 t1
# qhasm: reg128 v00
# qhasm: reg128 v01
# qhasm: reg128 v10
# qhasm: reg128 v11
# qhasm: int64 buf
# qhasm: reg128 mask0
# qhasm: reg128 mask1
# qhasm: reg128 mask2
# qhasm: reg128 mask3
# qhasm: reg128 mask4
# qhasm: reg128 mask5
# qhasm: enter transpose_64x64_asm
.p2align 5
.global _transpose_64x64_asm
.global transpose_64x64_asm
_transpose_64x64_asm:
transpose_64x64_asm:
mov % rsp, % r11
and $31, % r11
add $0, % r11
sub % r11, % rsp
# qhasm: mask0 aligned= mem128[ MASK5_0 ]
# asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1
# asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0
movdqa MASK5_0( % rip), % xmm0
# qhasm: mask1 aligned= mem128[ MASK5_1 ]
# asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2
# asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1
movdqa MASK5_1( % rip), % xmm1
# qhasm: mask2 aligned= mem128[ MASK4_0 ]
# asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3
# asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2
movdqa MASK4_0( % rip), % xmm2
# qhasm: mask3 aligned= mem128[ MASK4_1 ]
# asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4
# asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3
movdqa MASK4_1( % rip), % xmm3
# qhasm: mask4 aligned= mem128[ MASK3_0 ]
# asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5
# asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4
movdqa MASK3_0( % rip), % xmm4
# qhasm: mask5 aligned= mem128[ MASK3_1 ]
# asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6
# asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5
movdqa MASK3_1( % rip), % xmm5
# qhasm: r0 = mem64[ input_0 + 0 ] x2
# asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6
movddup 0( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 64 ] x2
# asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7
movddup 64( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 128 ] x2
# asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8
movddup 128( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 192 ] x2
# asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9
movddup 192( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 256 ] x2
# asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10
movddup 256( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 320 ] x2
# asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11
movddup 320( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 384 ] x2
# asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12
movddup 384( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 448 ] x2
# asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13
movddup 448( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = r4 << 32
# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = r0 unsigned>> 32
# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = r5 << 32
# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = r1 unsigned>> 32
# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = r6 << 32
# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = r2 unsigned>> 32
# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = r7 << 32
# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = r3 unsigned>> 32
# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = r2 << 16
# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = r0 unsigned>> 16
# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = r3 << 16
# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = r1 unsigned>> 16
# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = r6 << 16
# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = r4 unsigned>> 16
# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = r7 << 16
# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = r5 unsigned>> 16
# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = r1 << 8
# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = r0 unsigned>> 8
# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = r3 << 8
# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = r2 unsigned>> 8
# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = r5 << 8
# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = r4 unsigned>> 8
# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = r7 << 8
# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = r6 unsigned>> 8
# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: buf = r0[0]
# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
pextrq $0x0, % xmm9, % rsi
# qhasm: mem64[ input_0 + 0 ] = buf
# asm 1: movq <buf=int64#2,0(<input_0=int64#1)
# asm 2: movq <buf=%rsi,0(<input_0=%rdi)
movq % rsi, 0( % rdi)
# qhasm: buf = r1[0]
# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
pextrq $0x0, % xmm13, % rsi
# qhasm: mem64[ input_0 + 64 ] = buf
# asm 1: movq <buf=int64#2,64(<input_0=int64#1)
# asm 2: movq <buf=%rsi,64(<input_0=%rdi)
movq % rsi, 64( % rdi)
# qhasm: buf = r2[0]
# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
pextrq $0x0, % xmm14, % rsi
# qhasm: mem64[ input_0 + 128 ] = buf
# asm 1: movq <buf=int64#2,128(<input_0=int64#1)
# asm 2: movq <buf=%rsi,128(<input_0=%rdi)
movq % rsi, 128( % rdi)
# qhasm: buf = r3[0]
# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
pextrq $0x0, % xmm10, % rsi
# qhasm: mem64[ input_0 + 192 ] = buf
# asm 1: movq <buf=int64#2,192(<input_0=int64#1)
# asm 2: movq <buf=%rsi,192(<input_0=%rdi)
movq % rsi, 192( % rdi)
# qhasm: buf = r4[0]
# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
pextrq $0x0, % xmm11, % rsi
# qhasm: mem64[ input_0 + 256 ] = buf
# asm 1: movq <buf=int64#2,256(<input_0=int64#1)
# asm 2: movq <buf=%rsi,256(<input_0=%rdi)
movq % rsi, 256( % rdi)
# qhasm: buf = r5[0]
# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
pextrq $0x0, % xmm8, % rsi
# qhasm: mem64[ input_0 + 320 ] = buf
# asm 1: movq <buf=int64#2,320(<input_0=int64#1)
# asm 2: movq <buf=%rsi,320(<input_0=%rdi)
movq % rsi, 320( % rdi)
# qhasm: buf = r6[0]
# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
pextrq $0x0, % xmm12, % rsi
# qhasm: mem64[ input_0 + 384 ] = buf
# asm 1: movq <buf=int64#2,384(<input_0=int64#1)
# asm 2: movq <buf=%rsi,384(<input_0=%rdi)
movq % rsi, 384( % rdi)
# qhasm: buf = r7[0]
# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
pextrq $0x0, % xmm6, % rsi
# qhasm: mem64[ input_0 + 448 ] = buf
# asm 1: movq <buf=int64#2,448(<input_0=int64#1)
# asm 2: movq <buf=%rsi,448(<input_0=%rdi)
movq % rsi, 448( % rdi)
# qhasm: r0 = mem64[ input_0 + 8 ] x2
# asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6
movddup 8( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 72 ] x2
# asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7
movddup 72( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 136 ] x2
# asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8
movddup 136( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 200 ] x2
# asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9
movddup 200( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 264 ] x2
# asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10
movddup 264( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 328 ] x2
# asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11
movddup 328( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 392 ] x2
# asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12
movddup 392( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 456 ] x2
# asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13
movddup 456( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = r4 << 32
# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = r0 unsigned>> 32
# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = r5 << 32
# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = r1 unsigned>> 32
# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = r6 << 32
# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = r2 unsigned>> 32
# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = r7 << 32
# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = r3 unsigned>> 32
# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = r2 << 16
# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = r0 unsigned>> 16
# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = r3 << 16
# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = r1 unsigned>> 16
# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = r6 << 16
# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = r4 unsigned>> 16
# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = r7 << 16
# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = r5 unsigned>> 16
# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = r1 << 8
# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = r0 unsigned>> 8
# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = r3 << 8
# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = r2 unsigned>> 8
# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = r5 << 8
# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = r4 unsigned>> 8
# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = r7 << 8
# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = r6 unsigned>> 8
# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: buf = r0[0]
# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
pextrq $0x0, % xmm9, % rsi
# qhasm: mem64[ input_0 + 8 ] = buf
# asm 1: movq <buf=int64#2,8(<input_0=int64#1)
# asm 2: movq <buf=%rsi,8(<input_0=%rdi)
movq % rsi, 8( % rdi)
# qhasm: buf = r1[0]
# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
pextrq $0x0, % xmm13, % rsi
# qhasm: mem64[ input_0 + 72 ] = buf
# asm 1: movq <buf=int64#2,72(<input_0=int64#1)
# asm 2: movq <buf=%rsi,72(<input_0=%rdi)
movq % rsi, 72( % rdi)
# qhasm: buf = r2[0]
# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
pextrq $0x0, % xmm14, % rsi
# qhasm: mem64[ input_0 + 136 ] = buf
# asm 1: movq <buf=int64#2,136(<input_0=int64#1)
# asm 2: movq <buf=%rsi,136(<input_0=%rdi)
movq % rsi, 136( % rdi)
# qhasm: buf = r3[0]
# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
pextrq $0x0, % xmm10, % rsi
# qhasm: mem64[ input_0 + 200 ] = buf
# asm 1: movq <buf=int64#2,200(<input_0=int64#1)
# asm 2: movq <buf=%rsi,200(<input_0=%rdi)
movq % rsi, 200( % rdi)
# qhasm: buf = r4[0]
# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
pextrq $0x0, % xmm11, % rsi
# qhasm: mem64[ input_0 + 264 ] = buf
# asm 1: movq <buf=int64#2,264(<input_0=int64#1)
# asm 2: movq <buf=%rsi,264(<input_0=%rdi)
movq % rsi, 264( % rdi)
# qhasm: buf = r5[0]
# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
pextrq $0x0, % xmm8, % rsi
# qhasm: mem64[ input_0 + 328 ] = buf
# asm 1: movq <buf=int64#2,328(<input_0=int64#1)
# asm 2: movq <buf=%rsi,328(<input_0=%rdi)
movq % rsi, 328( % rdi)
# qhasm: buf = r6[0]
# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
pextrq $0x0, % xmm12, % rsi
# qhasm: mem64[ input_0 + 392 ] = buf
# asm 1: movq <buf=int64#2,392(<input_0=int64#1)
# asm 2: movq <buf=%rsi,392(<input_0=%rdi)
movq % rsi, 392( % rdi)
# qhasm: buf = r7[0]
# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
pextrq $0x0, % xmm6, % rsi
# qhasm: mem64[ input_0 + 456 ] = buf
# asm 1: movq <buf=int64#2,456(<input_0=int64#1)
# asm 2: movq <buf=%rsi,456(<input_0=%rdi)
movq % rsi, 456( % rdi)
# qhasm: r0 = mem64[ input_0 + 16 ] x2
# asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6
movddup 16( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 80 ] x2
# asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7
movddup 80( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 144 ] x2
# asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8
movddup 144( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 208 ] x2
# asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9
movddup 208( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 272 ] x2
# asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10
movddup 272( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 336 ] x2
# asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11
movddup 336( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 400 ] x2
# asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12
movddup 400( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 464 ] x2
# asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13
movddup 464( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = r4 << 32
# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = r0 unsigned>> 32
# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = r5 << 32
# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = r1 unsigned>> 32
# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = r6 << 32
# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = r2 unsigned>> 32
# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = r7 << 32
# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = r3 unsigned>> 32
# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = r2 << 16
# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = r0 unsigned>> 16
# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = r3 << 16
# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = r1 unsigned>> 16
# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = r6 << 16
# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = r4 unsigned>> 16
# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = r7 << 16
# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = r5 unsigned>> 16
# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = r1 << 8
# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = r0 unsigned>> 8
# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = r3 << 8
# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = r2 unsigned>> 8
# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = r5 << 8
# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = r4 unsigned>> 8
# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = r7 << 8
# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = r6 unsigned>> 8
# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: buf = r0[0]
# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
pextrq $0x0, % xmm9, % rsi
# qhasm: mem64[ input_0 + 16 ] = buf
# asm 1: movq <buf=int64#2,16(<input_0=int64#1)
# asm 2: movq <buf=%rsi,16(<input_0=%rdi)
movq % rsi, 16( % rdi)
# qhasm: buf = r1[0]
# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
pextrq $0x0, % xmm13, % rsi
# qhasm: mem64[ input_0 + 80 ] = buf
# asm 1: movq <buf=int64#2,80(<input_0=int64#1)
# asm 2: movq <buf=%rsi,80(<input_0=%rdi)
movq % rsi, 80( % rdi)
# qhasm: buf = r2[0]
# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
pextrq $0x0, % xmm14, % rsi
# qhasm: mem64[ input_0 + 144 ] = buf
# asm 1: movq <buf=int64#2,144(<input_0=int64#1)
# asm 2: movq <buf=%rsi,144(<input_0=%rdi)
movq % rsi, 144( % rdi)
# qhasm: buf = r3[0]
# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
pextrq $0x0, % xmm10, % rsi
# qhasm: mem64[ input_0 + 208 ] = buf
# asm 1: movq <buf=int64#2,208(<input_0=int64#1)
# asm 2: movq <buf=%rsi,208(<input_0=%rdi)
movq % rsi, 208( % rdi)
# qhasm: buf = r4[0]
# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
pextrq $0x0, % xmm11, % rsi
# qhasm: mem64[ input_0 + 272 ] = buf
# asm 1: movq <buf=int64#2,272(<input_0=int64#1)
# asm 2: movq <buf=%rsi,272(<input_0=%rdi)
movq % rsi, 272( % rdi)
# qhasm: buf = r5[0]
# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
pextrq $0x0, % xmm8, % rsi
# qhasm: mem64[ input_0 + 336 ] = buf
# asm 1: movq <buf=int64#2,336(<input_0=int64#1)
# asm 2: movq <buf=%rsi,336(<input_0=%rdi)
movq % rsi, 336( % rdi)
# qhasm: buf = r6[0]
# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
pextrq $0x0, % xmm12, % rsi
# qhasm: mem64[ input_0 + 400 ] = buf
# asm 1: movq <buf=int64#2,400(<input_0=int64#1)
# asm 2: movq <buf=%rsi,400(<input_0=%rdi)
movq % rsi, 400( % rdi)
# qhasm: buf = r7[0]
# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
pextrq $0x0, % xmm6, % rsi
# qhasm: mem64[ input_0 + 464 ] = buf
# asm 1: movq <buf=int64#2,464(<input_0=int64#1)
# asm 2: movq <buf=%rsi,464(<input_0=%rdi)
movq % rsi, 464( % rdi)
# qhasm: r0 = mem64[ input_0 + 24 ] x2
# asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6
movddup 24( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 88 ] x2
# asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7
movddup 88( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 152 ] x2
# asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8
movddup 152( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 216 ] x2
# asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9
movddup 216( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 280 ] x2
# asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10
movddup 280( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 344 ] x2
# asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11
movddup 344( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 408 ] x2
# asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12
movddup 408( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 472 ] x2
# asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13
movddup 472( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = r4 << 32
# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = r0 unsigned>> 32
# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = r5 << 32
# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = r1 unsigned>> 32
# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = r6 << 32
# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = r2 unsigned>> 32
# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = r7 << 32
# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = r3 unsigned>> 32
# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = r2 << 16
# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = r0 unsigned>> 16
# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = r3 << 16
# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = r1 unsigned>> 16
# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = r6 << 16
# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = r4 unsigned>> 16
# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = r7 << 16
# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = r5 unsigned>> 16
# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = r1 << 8
# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = r0 unsigned>> 8
# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = r3 << 8
# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = r2 unsigned>> 8
# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = r5 << 8
# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = r4 unsigned>> 8
# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = r7 << 8
# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = r6 unsigned>> 8
# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: buf = r0[0]
# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
pextrq $0x0, % xmm9, % rsi
# qhasm: mem64[ input_0 + 24 ] = buf
# asm 1: movq <buf=int64#2,24(<input_0=int64#1)
# asm 2: movq <buf=%rsi,24(<input_0=%rdi)
movq % rsi, 24( % rdi)
# qhasm: buf = r1[0]
# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
pextrq $0x0, % xmm13, % rsi
# qhasm: mem64[ input_0 + 88 ] = buf
# asm 1: movq <buf=int64#2,88(<input_0=int64#1)
# asm 2: movq <buf=%rsi,88(<input_0=%rdi)
movq % rsi, 88( % rdi)
# qhasm: buf = r2[0]
# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
pextrq $0x0, % xmm14, % rsi
# qhasm: mem64[ input_0 + 152 ] = buf
# asm 1: movq <buf=int64#2,152(<input_0=int64#1)
# asm 2: movq <buf=%rsi,152(<input_0=%rdi)
movq % rsi, 152( % rdi)
# qhasm: buf = r3[0]
# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
pextrq $0x0, % xmm10, % rsi
# qhasm: mem64[ input_0 + 216 ] = buf
# asm 1: movq <buf=int64#2,216(<input_0=int64#1)
# asm 2: movq <buf=%rsi,216(<input_0=%rdi)
movq % rsi, 216( % rdi)
# qhasm: buf = r4[0]
# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
pextrq $0x0, % xmm11, % rsi
# qhasm: mem64[ input_0 + 280 ] = buf
# asm 1: movq <buf=int64#2,280(<input_0=int64#1)
# asm 2: movq <buf=%rsi,280(<input_0=%rdi)
movq % rsi, 280( % rdi)
# qhasm: buf = r5[0]
# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
pextrq $0x0, % xmm8, % rsi
# qhasm: mem64[ input_0 + 344 ] = buf
# asm 1: movq <buf=int64#2,344(<input_0=int64#1)
# asm 2: movq <buf=%rsi,344(<input_0=%rdi)
movq % rsi, 344( % rdi)
# qhasm: buf = r6[0]
# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
pextrq $0x0, % xmm12, % rsi
# qhasm: mem64[ input_0 + 408 ] = buf
# asm 1: movq <buf=int64#2,408(<input_0=int64#1)
# asm 2: movq <buf=%rsi,408(<input_0=%rdi)
movq % rsi, 408( % rdi)
# qhasm: buf = r7[0]
# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
pextrq $0x0, % xmm6, % rsi
# qhasm: mem64[ input_0 + 472 ] = buf
# asm 1: movq <buf=int64#2,472(<input_0=int64#1)
# asm 2: movq <buf=%rsi,472(<input_0=%rdi)
movq % rsi, 472( % rdi)
# qhasm: r0 = mem64[ input_0 + 32 ] x2
# asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6
movddup 32( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 96 ] x2
# asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7
movddup 96( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 160 ] x2
# asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8
movddup 160( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 224 ] x2
# asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9
movddup 224( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 288 ] x2
# asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10
movddup 288( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 352 ] x2
# asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11
movddup 352( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 416 ] x2
# asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12
movddup 416( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 480 ] x2
# asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13
movddup 480( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = r4 << 32
# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = r0 unsigned>> 32
# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = r5 << 32
# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = r1 unsigned>> 32
# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = r6 << 32
# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = r2 unsigned>> 32
# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = r7 << 32
# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = r3 unsigned>> 32
# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = r2 << 16
# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = r0 unsigned>> 16
# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = r3 << 16
# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = r1 unsigned>> 16
# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = r6 << 16
# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = r4 unsigned>> 16
# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = r7 << 16
# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = r5 unsigned>> 16
# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = r1 << 8
# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = r0 unsigned>> 8
# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = r3 << 8
# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = r2 unsigned>> 8
# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = r5 << 8
# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = r4 unsigned>> 8
# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = r7 << 8
# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = r6 unsigned>> 8
# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: buf = r0[0]
# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
pextrq $0x0, % xmm9, % rsi
# qhasm: mem64[ input_0 + 32 ] = buf
# asm 1: movq <buf=int64#2,32(<input_0=int64#1)
# asm 2: movq <buf=%rsi,32(<input_0=%rdi)
movq % rsi, 32( % rdi)
# qhasm: buf = r1[0]
# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
pextrq $0x0, % xmm13, % rsi
# qhasm: mem64[ input_0 + 96 ] = buf
# asm 1: movq <buf=int64#2,96(<input_0=int64#1)
# asm 2: movq <buf=%rsi,96(<input_0=%rdi)
movq % rsi, 96( % rdi)
# qhasm: buf = r2[0]
# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
pextrq $0x0, % xmm14, % rsi
# qhasm: mem64[ input_0 + 160 ] = buf
# asm 1: movq <buf=int64#2,160(<input_0=int64#1)
# asm 2: movq <buf=%rsi,160(<input_0=%rdi)
movq % rsi, 160( % rdi)
# qhasm: buf = r3[0]
# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
pextrq $0x0, % xmm10, % rsi
# qhasm: mem64[ input_0 + 224 ] = buf
# asm 1: movq <buf=int64#2,224(<input_0=int64#1)
# asm 2: movq <buf=%rsi,224(<input_0=%rdi)
movq % rsi, 224( % rdi)
# qhasm: buf = r4[0]
# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
pextrq $0x0, % xmm11, % rsi
# qhasm: mem64[ input_0 + 288 ] = buf
# asm 1: movq <buf=int64#2,288(<input_0=int64#1)
# asm 2: movq <buf=%rsi,288(<input_0=%rdi)
movq % rsi, 288( % rdi)
# qhasm: buf = r5[0]
# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
pextrq $0x0, % xmm8, % rsi
# qhasm: mem64[ input_0 + 352 ] = buf
# asm 1: movq <buf=int64#2,352(<input_0=int64#1)
# asm 2: movq <buf=%rsi,352(<input_0=%rdi)
movq % rsi, 352( % rdi)
# qhasm: buf = r6[0]
# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
pextrq $0x0, % xmm12, % rsi
# qhasm: mem64[ input_0 + 416 ] = buf
# asm 1: movq <buf=int64#2,416(<input_0=int64#1)
# asm 2: movq <buf=%rsi,416(<input_0=%rdi)
movq % rsi, 416( % rdi)
# qhasm: buf = r7[0]
# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
pextrq $0x0, % xmm6, % rsi
# qhasm: mem64[ input_0 + 480 ] = buf
# asm 1: movq <buf=int64#2,480(<input_0=int64#1)
# asm 2: movq <buf=%rsi,480(<input_0=%rdi)
movq % rsi, 480( % rdi)
# qhasm: r0 = mem64[ input_0 + 40 ] x2
# asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6
movddup 40( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 104 ] x2
# asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7
movddup 104( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 168 ] x2
# asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8
movddup 168( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 232 ] x2
# asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9
movddup 232( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 296 ] x2
# asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10
movddup 296( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 360 ] x2
# asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11
movddup 360( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 424 ] x2
# asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12
movddup 424( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 488 ] x2
# asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13
movddup 488( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = r4 << 32
# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = r0 unsigned>> 32
# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = r5 << 32
# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = r1 unsigned>> 32
# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = r6 << 32
# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = r2 unsigned>> 32
# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = r7 << 32
# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = r3 unsigned>> 32
# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = r2 << 16
# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = r0 unsigned>> 16
# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = r3 << 16
# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = r1 unsigned>> 16
# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = r6 << 16
# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = r4 unsigned>> 16
# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = r7 << 16
# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = r5 unsigned>> 16
# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = r1 << 8
# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = r0 unsigned>> 8
# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = r3 << 8
# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = r2 unsigned>> 8
# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = r5 << 8
# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = r4 unsigned>> 8
# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = r7 << 8
# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = r6 unsigned>> 8
# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: buf = r0[0]
# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
pextrq $0x0, % xmm9, % rsi
# qhasm: mem64[ input_0 + 40 ] = buf
# asm 1: movq <buf=int64#2,40(<input_0=int64#1)
# asm 2: movq <buf=%rsi,40(<input_0=%rdi)
movq % rsi, 40( % rdi)
# qhasm: buf = r1[0]
# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
pextrq $0x0, % xmm13, % rsi
# qhasm: mem64[ input_0 + 104 ] = buf
# asm 1: movq <buf=int64#2,104(<input_0=int64#1)
# asm 2: movq <buf=%rsi,104(<input_0=%rdi)
movq % rsi, 104( % rdi)
# qhasm: buf = r2[0]
# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
pextrq $0x0, % xmm14, % rsi
# qhasm: mem64[ input_0 + 168 ] = buf
# asm 1: movq <buf=int64#2,168(<input_0=int64#1)
# asm 2: movq <buf=%rsi,168(<input_0=%rdi)
movq % rsi, 168( % rdi)
# qhasm: buf = r3[0]
# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
pextrq $0x0, % xmm10, % rsi
# qhasm: mem64[ input_0 + 232 ] = buf
# asm 1: movq <buf=int64#2,232(<input_0=int64#1)
# asm 2: movq <buf=%rsi,232(<input_0=%rdi)
movq % rsi, 232( % rdi)
# qhasm: buf = r4[0]
# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
pextrq $0x0, % xmm11, % rsi
# qhasm: mem64[ input_0 + 296 ] = buf
# asm 1: movq <buf=int64#2,296(<input_0=int64#1)
# asm 2: movq <buf=%rsi,296(<input_0=%rdi)
movq % rsi, 296( % rdi)
# qhasm: buf = r5[0]
# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
pextrq $0x0, % xmm8, % rsi
# qhasm: mem64[ input_0 + 360 ] = buf
# asm 1: movq <buf=int64#2,360(<input_0=int64#1)
# asm 2: movq <buf=%rsi,360(<input_0=%rdi)
movq % rsi, 360( % rdi)
# qhasm: buf = r6[0]
# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
pextrq $0x0, % xmm12, % rsi
# qhasm: mem64[ input_0 + 424 ] = buf
# asm 1: movq <buf=int64#2,424(<input_0=int64#1)
# asm 2: movq <buf=%rsi,424(<input_0=%rdi)
movq % rsi, 424( % rdi)
# qhasm: buf = r7[0]
# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
pextrq $0x0, % xmm6, % rsi
# qhasm: mem64[ input_0 + 488 ] = buf
# asm 1: movq <buf=int64#2,488(<input_0=int64#1)
# asm 2: movq <buf=%rsi,488(<input_0=%rdi)
movq % rsi, 488( % rdi)
# qhasm: r0 = mem64[ input_0 + 48 ] x2
# asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6
movddup 48( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 112 ] x2
# asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7
movddup 112( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 176 ] x2
# asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8
movddup 176( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 240 ] x2
# asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9
movddup 240( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 304 ] x2
# asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10
movddup 304( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 368 ] x2
# asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11
movddup 368( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 432 ] x2
# asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12
movddup 432( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 496 ] x2
# asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13
movddup 496( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = r4 << 32
# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = r0 unsigned>> 32
# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = r5 << 32
# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = r1 unsigned>> 32
# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = r6 << 32
# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = r2 unsigned>> 32
# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = r7 << 32
# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = r3 unsigned>> 32
# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = r2 << 16
# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = r0 unsigned>> 16
# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = r3 << 16
# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = r1 unsigned>> 16
# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = r6 << 16
# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = r4 unsigned>> 16
# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = r7 << 16
# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = r5 unsigned>> 16
# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = r1 << 8
# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = r0 unsigned>> 8
# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = r3 << 8
# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = r2 unsigned>> 8
# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = r5 << 8
# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = r4 unsigned>> 8
# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = r7 << 8
# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = r6 unsigned>> 8
# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: buf = r0[0]
# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
pextrq $0x0, % xmm9, % rsi
# qhasm: mem64[ input_0 + 48 ] = buf
# asm 1: movq <buf=int64#2,48(<input_0=int64#1)
# asm 2: movq <buf=%rsi,48(<input_0=%rdi)
movq % rsi, 48( % rdi)
# qhasm: buf = r1[0]
# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
pextrq $0x0, % xmm13, % rsi
# qhasm: mem64[ input_0 + 112 ] = buf
# asm 1: movq <buf=int64#2,112(<input_0=int64#1)
# asm 2: movq <buf=%rsi,112(<input_0=%rdi)
movq % rsi, 112( % rdi)
# qhasm: buf = r2[0]
# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
pextrq $0x0, % xmm14, % rsi
# qhasm: mem64[ input_0 + 176 ] = buf
# asm 1: movq <buf=int64#2,176(<input_0=int64#1)
# asm 2: movq <buf=%rsi,176(<input_0=%rdi)
movq % rsi, 176( % rdi)
# qhasm: buf = r3[0]
# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
pextrq $0x0, % xmm10, % rsi
# qhasm: mem64[ input_0 + 240 ] = buf
# asm 1: movq <buf=int64#2,240(<input_0=int64#1)
# asm 2: movq <buf=%rsi,240(<input_0=%rdi)
movq % rsi, 240( % rdi)
# qhasm: buf = r4[0]
# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
pextrq $0x0, % xmm11, % rsi
# qhasm: mem64[ input_0 + 304 ] = buf
# asm 1: movq <buf=int64#2,304(<input_0=int64#1)
# asm 2: movq <buf=%rsi,304(<input_0=%rdi)
movq % rsi, 304( % rdi)
# qhasm: buf = r5[0]
# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
pextrq $0x0, % xmm8, % rsi
# qhasm: mem64[ input_0 + 368 ] = buf
# asm 1: movq <buf=int64#2,368(<input_0=int64#1)
# asm 2: movq <buf=%rsi,368(<input_0=%rdi)
movq % rsi, 368( % rdi)
# qhasm: buf = r6[0]
# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
pextrq $0x0, % xmm12, % rsi
# qhasm: mem64[ input_0 + 432 ] = buf
# asm 1: movq <buf=int64#2,432(<input_0=int64#1)
# asm 2: movq <buf=%rsi,432(<input_0=%rdi)
movq % rsi, 432( % rdi)
# qhasm: buf = r7[0]
# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
pextrq $0x0, % xmm6, % rsi
# qhasm: mem64[ input_0 + 496 ] = buf
# asm 1: movq <buf=int64#2,496(<input_0=int64#1)
# asm 2: movq <buf=%rsi,496(<input_0=%rdi)
movq % rsi, 496( % rdi)
# qhasm: r0 = mem64[ input_0 + 56 ] x2
# asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6
movddup 56( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 120 ] x2
# asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7
movddup 120( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 184 ] x2
# asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8
movddup 184( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 248 ] x2
# asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9
movddup 248( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 312 ] x2
# asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10
movddup 312( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 376 ] x2
# asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11
movddup 376( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 440 ] x2
# asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12
movddup 440( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 504 ] x2
# asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13
movddup 504( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = r4 << 32
# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = r0 unsigned>> 32
# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = r5 << 32
# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = r1 unsigned>> 32
# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = r6 << 32
# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = r2 unsigned>> 32
# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0
vpand % xmm0, % xmm9, % xmm0
# qhasm: 2x v10 = r7 << 32
# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13
# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12
vpsllq $32, % xmm13, % xmm12
# qhasm: 2x v01 = r3 unsigned>> 32
# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1
vpand % xmm1, % xmm13, % xmm1
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1
# asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0
vpor % xmm12, % xmm0, % xmm0
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1
vpor % xmm1, % xmm9, % xmm1
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9
vpand % xmm2, % xmm14, % xmm9
# qhasm: 4x v10 = r2 << 16
# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13
# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12
vpslld $16, % xmm11, % xmm12
# qhasm: 4x v01 = r0 unsigned>> 16
# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14
# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13
vpsrld $16, % xmm14, % xmm13
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9
vpor % xmm12, % xmm9, % xmm9
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11
vpor % xmm11, % xmm13, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12
vpand % xmm2, % xmm10, % xmm12
# qhasm: 4x v10 = r3 << 16
# asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14
# asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13
vpslld $16, % xmm0, % xmm13
# qhasm: 4x v01 = r1 unsigned>> 16
# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1
# asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0
vpand % xmm3, % xmm0, % xmm0
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13
# asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12
vpor % xmm13, % xmm12, % xmm12
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1
# asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0
vpor % xmm0, % xmm10, % xmm0
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10
vpand % xmm2, % xmm6, % xmm10
# qhasm: 4x v10 = r6 << 16
# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14
# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13
vpslld $16, % xmm8, % xmm13
# qhasm: 4x v01 = r4 unsigned>> 16
# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11
# asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10
vpor % xmm13, % xmm10, % xmm10
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2
vpand % xmm2, % xmm7, % xmm2
# qhasm: 4x v10 = r7 << 16
# asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9
# asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8
vpslld $16, % xmm1, % xmm8
# qhasm: 4x v01 = r5 unsigned>> 16
# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2
# asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1
vpand % xmm3, % xmm1, % xmm1
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3
# asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2
vpor % xmm8, % xmm2, % xmm2
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1
vpor % xmm1, % xmm7, % xmm1
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4
# asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3
vpand % xmm4, % xmm9, % xmm3
# qhasm: 8x v10 = r1 << 8
# asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8
# asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7
vpsllw $8, % xmm12, % xmm7
# qhasm: 8x v01 = r0 unsigned>> 8
# asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9
# asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8
vpsrlw $8, % xmm9, % xmm8
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10
# asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9
vpand % xmm5, % xmm12, % xmm9
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4
# asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3
vpor % xmm7, % xmm3, % xmm3
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7
vpor % xmm9, % xmm8, % xmm7
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8
vpand % xmm4, % xmm11, % xmm8
# qhasm: 8x v10 = r3 << 8
# asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10
# asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9
vpsllw $8, % xmm0, % xmm9
# qhasm: 8x v01 = r2 unsigned>> 8
# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1
# asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0
vpand % xmm5, % xmm0, % xmm0
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9
# asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8
vpor % xmm9, % xmm8, % xmm8
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1
# asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0
vpor % xmm0, % xmm11, % xmm0
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9
vpand % xmm4, % xmm10, % xmm9
# qhasm: 8x v10 = r5 << 8
# asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12
# asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11
vpsllw $8, % xmm2, % xmm11
# qhasm: 8x v01 = r4 unsigned>> 8
# asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11
# asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10
vpsrlw $8, % xmm10, % xmm10
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3
# asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2
vpand % xmm5, % xmm2, % xmm2
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10
# asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9
vpor % xmm11, % xmm9, % xmm9
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3
# asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2
vpor % xmm2, % xmm10, % xmm2
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4
vpand % xmm4, % xmm6, % xmm4
# qhasm: 8x v10 = r7 << 8
# asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11
# asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10
vpsllw $8, % xmm1, % xmm10
# qhasm: 8x v01 = r6 unsigned>> 8
# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2
# asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1
vpand % xmm5, % xmm1, % xmm1
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5
# asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4
vpor % xmm10, % xmm4, % xmm4
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1
vpor % xmm1, % xmm6, % xmm1
# qhasm: buf = r0[0]
# asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2
# asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi
pextrq $0x0, % xmm3, % rsi
# qhasm: mem64[ input_0 + 56 ] = buf
# asm 1: movq <buf=int64#2,56(<input_0=int64#1)
# asm 2: movq <buf=%rsi,56(<input_0=%rdi)
movq % rsi, 56( % rdi)
# qhasm: buf = r1[0]
# asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2
# asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi
pextrq $0x0, % xmm7, % rsi
# qhasm: mem64[ input_0 + 120 ] = buf
# asm 1: movq <buf=int64#2,120(<input_0=int64#1)
# asm 2: movq <buf=%rsi,120(<input_0=%rdi)
movq % rsi, 120( % rdi)
# qhasm: buf = r2[0]
# asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2
# asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi
pextrq $0x0, % xmm8, % rsi
# qhasm: mem64[ input_0 + 184 ] = buf
# asm 1: movq <buf=int64#2,184(<input_0=int64#1)
# asm 2: movq <buf=%rsi,184(<input_0=%rdi)
movq % rsi, 184( % rdi)
# qhasm: buf = r3[0]
# asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2
# asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi
pextrq $0x0, % xmm0, % rsi
# qhasm: mem64[ input_0 + 248 ] = buf
# asm 1: movq <buf=int64#2,248(<input_0=int64#1)
# asm 2: movq <buf=%rsi,248(<input_0=%rdi)
movq % rsi, 248( % rdi)
# qhasm: buf = r4[0]
# asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2
# asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi
pextrq $0x0, % xmm9, % rsi
# qhasm: mem64[ input_0 + 312 ] = buf
# asm 1: movq <buf=int64#2,312(<input_0=int64#1)
# asm 2: movq <buf=%rsi,312(<input_0=%rdi)
movq % rsi, 312( % rdi)
# qhasm: buf = r5[0]
# asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2
# asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi
pextrq $0x0, % xmm2, % rsi
# qhasm: mem64[ input_0 + 376 ] = buf
# asm 1: movq <buf=int64#2,376(<input_0=int64#1)
# asm 2: movq <buf=%rsi,376(<input_0=%rdi)
movq % rsi, 376( % rdi)
# qhasm: buf = r6[0]
# asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2
# asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi
pextrq $0x0, % xmm4, % rsi
# qhasm: mem64[ input_0 + 440 ] = buf
# asm 1: movq <buf=int64#2,440(<input_0=int64#1)
# asm 2: movq <buf=%rsi,440(<input_0=%rdi)
movq % rsi, 440( % rdi)
# qhasm: buf = r7[0]
# asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2
# asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi
pextrq $0x0, % xmm1, % rsi
# qhasm: mem64[ input_0 + 504 ] = buf
# asm 1: movq <buf=int64#2,504(<input_0=int64#1)
# asm 2: movq <buf=%rsi,504(<input_0=%rdi)
movq % rsi, 504( % rdi)
# qhasm: mask0 aligned= mem128[ MASK2_0 ]
# asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1
# asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0
movdqa MASK2_0( % rip), % xmm0
# qhasm: mask1 aligned= mem128[ MASK2_1 ]
# asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2
# asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1
movdqa MASK2_1( % rip), % xmm1
# qhasm: mask2 aligned= mem128[ MASK1_0 ]
# asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3
# asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2
movdqa MASK1_0( % rip), % xmm2
# qhasm: mask3 aligned= mem128[ MASK1_1 ]
# asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4
# asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3
movdqa MASK1_1( % rip), % xmm3
# qhasm: mask4 aligned= mem128[ MASK0_0 ]
# asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5
# asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4
movdqa MASK0_0( % rip), % xmm4
# qhasm: mask5 aligned= mem128[ MASK0_1 ]
# asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6
# asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5
movdqa MASK0_1( % rip), % xmm5
# qhasm: r0 = mem64[ input_0 + 0 ] x2
# asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6
movddup 0( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 8 ] x2
# asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7
movddup 8( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 16 ] x2
# asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8
movddup 16( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 24 ] x2
# asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9
movddup 24( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 32 ] x2
# asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10
movddup 32( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 40 ] x2
# asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11
movddup 40( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 48 ] x2
# asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12
movddup 48( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 56 ] x2
# asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13
movddup 56( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = r4 & mask0
# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r0 & mask1
# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = r5 & mask0
# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r1 & mask1
# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = r6 & mask0
# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r2 & mask1
# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = r7 & mask0
# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r3 & mask1
# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = r2 & mask2
# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r0 & mask3
# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = r3 & mask2
# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r1 & mask3
# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = r6 & mask2
# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r4 & mask3
# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = r7 & mask2
# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r5 & mask3
# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = r1 & mask4
# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r0 & mask5
# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = r3 & mask4
# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r2 & mask5
# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = r5 & mask4
# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r4 & mask5
# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = r7 & mask4
# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r6 & mask5
# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: t0 = r0[0]r1[0]
# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
vpunpcklqdq % xmm13, % xmm9, % xmm7
# qhasm: mem128[ input_0 + 0 ] = t0
# asm 1: movdqu <t0=reg128#8,0(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,0(<input_0=%rdi)
movdqu % xmm7, 0( % rdi)
# qhasm: t0 = r2[0]r3[0]
# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
vpunpcklqdq % xmm10, % xmm14, % xmm7
# qhasm: mem128[ input_0 + 16 ] = t0
# asm 1: movdqu <t0=reg128#8,16(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,16(<input_0=%rdi)
movdqu % xmm7, 16( % rdi)
# qhasm: t0 = r4[0]r5[0]
# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
vpunpcklqdq % xmm8, % xmm11, % xmm7
# qhasm: mem128[ input_0 + 32 ] = t0
# asm 1: movdqu <t0=reg128#8,32(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,32(<input_0=%rdi)
movdqu % xmm7, 32( % rdi)
# qhasm: t0 = r6[0]r7[0]
# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
vpunpcklqdq % xmm6, % xmm12, % xmm6
# qhasm: mem128[ input_0 + 48 ] = t0
# asm 1: movdqu <t0=reg128#7,48(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm6,48(<input_0=%rdi)
movdqu % xmm6, 48( % rdi)
# qhasm: r0 = mem64[ input_0 + 64 ] x2
# asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6
movddup 64( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 72 ] x2
# asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7
movddup 72( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 80 ] x2
# asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8
movddup 80( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 88 ] x2
# asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9
movddup 88( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 96 ] x2
# asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10
movddup 96( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 104 ] x2
# asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11
movddup 104( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 112 ] x2
# asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12
movddup 112( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 120 ] x2
# asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13
movddup 120( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = r4 & mask0
# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r0 & mask1
# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = r5 & mask0
# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r1 & mask1
# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = r6 & mask0
# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r2 & mask1
# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = r7 & mask0
# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r3 & mask1
# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = r2 & mask2
# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r0 & mask3
# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = r3 & mask2
# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r1 & mask3
# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = r6 & mask2
# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r4 & mask3
# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = r7 & mask2
# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r5 & mask3
# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = r1 & mask4
# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r0 & mask5
# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = r3 & mask4
# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r2 & mask5
# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = r5 & mask4
# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r4 & mask5
# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = r7 & mask4
# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r6 & mask5
# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: t0 = r0[0]r1[0]
# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
vpunpcklqdq % xmm13, % xmm9, % xmm7
# qhasm: mem128[ input_0 + 64 ] = t0
# asm 1: movdqu <t0=reg128#8,64(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,64(<input_0=%rdi)
movdqu % xmm7, 64( % rdi)
# qhasm: t0 = r2[0]r3[0]
# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
vpunpcklqdq % xmm10, % xmm14, % xmm7
# qhasm: mem128[ input_0 + 80 ] = t0
# asm 1: movdqu <t0=reg128#8,80(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,80(<input_0=%rdi)
movdqu % xmm7, 80( % rdi)
# qhasm: t0 = r4[0]r5[0]
# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
vpunpcklqdq % xmm8, % xmm11, % xmm7
# qhasm: mem128[ input_0 + 96 ] = t0
# asm 1: movdqu <t0=reg128#8,96(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,96(<input_0=%rdi)
movdqu % xmm7, 96( % rdi)
# qhasm: t0 = r6[0]r7[0]
# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
vpunpcklqdq % xmm6, % xmm12, % xmm6
# qhasm: mem128[ input_0 + 112 ] = t0
# asm 1: movdqu <t0=reg128#7,112(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm6,112(<input_0=%rdi)
movdqu % xmm6, 112( % rdi)
# qhasm: r0 = mem64[ input_0 + 128 ] x2
# asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6
movddup 128( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 136 ] x2
# asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7
movddup 136( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 144 ] x2
# asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8
movddup 144( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 152 ] x2
# asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9
movddup 152( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 160 ] x2
# asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10
movddup 160( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 168 ] x2
# asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11
movddup 168( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 176 ] x2
# asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12
movddup 176( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 184 ] x2
# asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13
movddup 184( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = r4 & mask0
# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r0 & mask1
# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = r5 & mask0
# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r1 & mask1
# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = r6 & mask0
# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r2 & mask1
# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = r7 & mask0
# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r3 & mask1
# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = r2 & mask2
# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r0 & mask3
# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = r3 & mask2
# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r1 & mask3
# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = r6 & mask2
# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r4 & mask3
# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = r7 & mask2
# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r5 & mask3
# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = r1 & mask4
# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r0 & mask5
# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = r3 & mask4
# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r2 & mask5
# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = r5 & mask4
# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r4 & mask5
# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = r7 & mask4
# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r6 & mask5
# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: t0 = r0[0]r1[0]
# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
vpunpcklqdq % xmm13, % xmm9, % xmm7
# qhasm: mem128[ input_0 + 128 ] = t0
# asm 1: movdqu <t0=reg128#8,128(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,128(<input_0=%rdi)
movdqu % xmm7, 128( % rdi)
# qhasm: t0 = r2[0]r3[0]
# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
vpunpcklqdq % xmm10, % xmm14, % xmm7
# qhasm: mem128[ input_0 + 144 ] = t0
# asm 1: movdqu <t0=reg128#8,144(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,144(<input_0=%rdi)
movdqu % xmm7, 144( % rdi)
# qhasm: t0 = r4[0]r5[0]
# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
vpunpcklqdq % xmm8, % xmm11, % xmm7
# qhasm: mem128[ input_0 + 160 ] = t0
# asm 1: movdqu <t0=reg128#8,160(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,160(<input_0=%rdi)
movdqu % xmm7, 160( % rdi)
# qhasm: t0 = r6[0]r7[0]
# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
vpunpcklqdq % xmm6, % xmm12, % xmm6
# qhasm: mem128[ input_0 + 176 ] = t0
# asm 1: movdqu <t0=reg128#7,176(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm6,176(<input_0=%rdi)
movdqu % xmm6, 176( % rdi)
# qhasm: r0 = mem64[ input_0 + 192 ] x2
# asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6
movddup 192( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 200 ] x2
# asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7
movddup 200( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 208 ] x2
# asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8
movddup 208( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 216 ] x2
# asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9
movddup 216( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 224 ] x2
# asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10
movddup 224( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 232 ] x2
# asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11
movddup 232( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 240 ] x2
# asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12
movddup 240( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 248 ] x2
# asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13
movddup 248( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = r4 & mask0
# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r0 & mask1
# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = r5 & mask0
# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r1 & mask1
# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = r6 & mask0
# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r2 & mask1
# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = r7 & mask0
# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r3 & mask1
# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = r2 & mask2
# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r0 & mask3
# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = r3 & mask2
# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r1 & mask3
# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = r6 & mask2
# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r4 & mask3
# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = r7 & mask2
# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r5 & mask3
# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = r1 & mask4
# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r0 & mask5
# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = r3 & mask4
# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r2 & mask5
# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = r5 & mask4
# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r4 & mask5
# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = r7 & mask4
# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r6 & mask5
# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: t0 = r0[0]r1[0]
# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
vpunpcklqdq % xmm13, % xmm9, % xmm7
# qhasm: mem128[ input_0 + 192 ] = t0
# asm 1: movdqu <t0=reg128#8,192(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,192(<input_0=%rdi)
movdqu % xmm7, 192( % rdi)
# qhasm: t0 = r2[0]r3[0]
# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
vpunpcklqdq % xmm10, % xmm14, % xmm7
# qhasm: mem128[ input_0 + 208 ] = t0
# asm 1: movdqu <t0=reg128#8,208(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,208(<input_0=%rdi)
movdqu % xmm7, 208( % rdi)
# qhasm: t0 = r4[0]r5[0]
# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
vpunpcklqdq % xmm8, % xmm11, % xmm7
# qhasm: mem128[ input_0 + 224 ] = t0
# asm 1: movdqu <t0=reg128#8,224(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,224(<input_0=%rdi)
movdqu % xmm7, 224( % rdi)
# qhasm: t0 = r6[0]r7[0]
# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
vpunpcklqdq % xmm6, % xmm12, % xmm6
# qhasm: mem128[ input_0 + 240 ] = t0
# asm 1: movdqu <t0=reg128#7,240(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm6,240(<input_0=%rdi)
movdqu % xmm6, 240( % rdi)
# qhasm: r0 = mem64[ input_0 + 256 ] x2
# asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6
movddup 256( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 264 ] x2
# asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7
movddup 264( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 272 ] x2
# asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8
movddup 272( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 280 ] x2
# asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9
movddup 280( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 288 ] x2
# asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10
movddup 288( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 296 ] x2
# asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11
movddup 296( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 304 ] x2
# asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12
movddup 304( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 312 ] x2
# asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13
movddup 312( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = r4 & mask0
# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r0 & mask1
# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = r5 & mask0
# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r1 & mask1
# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = r6 & mask0
# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r2 & mask1
# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = r7 & mask0
# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r3 & mask1
# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = r2 & mask2
# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r0 & mask3
# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = r3 & mask2
# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r1 & mask3
# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = r6 & mask2
# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r4 & mask3
# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = r7 & mask2
# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r5 & mask3
# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = r1 & mask4
# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r0 & mask5
# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = r3 & mask4
# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r2 & mask5
# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = r5 & mask4
# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r4 & mask5
# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = r7 & mask4
# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r6 & mask5
# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: t0 = r0[0]r1[0]
# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
vpunpcklqdq % xmm13, % xmm9, % xmm7
# qhasm: mem128[ input_0 + 256 ] = t0
# asm 1: movdqu <t0=reg128#8,256(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,256(<input_0=%rdi)
movdqu % xmm7, 256( % rdi)
# qhasm: t0 = r2[0]r3[0]
# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
vpunpcklqdq % xmm10, % xmm14, % xmm7
# qhasm: mem128[ input_0 + 272 ] = t0
# asm 1: movdqu <t0=reg128#8,272(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,272(<input_0=%rdi)
movdqu % xmm7, 272( % rdi)
# qhasm: t0 = r4[0]r5[0]
# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
vpunpcklqdq % xmm8, % xmm11, % xmm7
# qhasm: mem128[ input_0 + 288 ] = t0
# asm 1: movdqu <t0=reg128#8,288(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,288(<input_0=%rdi)
movdqu % xmm7, 288( % rdi)
# qhasm: t0 = r6[0]r7[0]
# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
vpunpcklqdq % xmm6, % xmm12, % xmm6
# qhasm: mem128[ input_0 + 304 ] = t0
# asm 1: movdqu <t0=reg128#7,304(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm6,304(<input_0=%rdi)
movdqu % xmm6, 304( % rdi)
# qhasm: r0 = mem64[ input_0 + 320 ] x2
# asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6
movddup 320( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 328 ] x2
# asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7
movddup 328( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 336 ] x2
# asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8
movddup 336( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 344 ] x2
# asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9
movddup 344( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 352 ] x2
# asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10
movddup 352( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 360 ] x2
# asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11
movddup 360( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 368 ] x2
# asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12
movddup 368( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 376 ] x2
# asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13
movddup 376( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = r4 & mask0
# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r0 & mask1
# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = r5 & mask0
# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r1 & mask1
# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = r6 & mask0
# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r2 & mask1
# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = r7 & mask0
# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r3 & mask1
# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = r2 & mask2
# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r0 & mask3
# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = r3 & mask2
# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r1 & mask3
# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = r6 & mask2
# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r4 & mask3
# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = r7 & mask2
# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r5 & mask3
# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = r1 & mask4
# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r0 & mask5
# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = r3 & mask4
# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r2 & mask5
# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = r5 & mask4
# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r4 & mask5
# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = r7 & mask4
# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r6 & mask5
# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: t0 = r0[0]r1[0]
# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
vpunpcklqdq % xmm13, % xmm9, % xmm7
# qhasm: mem128[ input_0 + 320 ] = t0
# asm 1: movdqu <t0=reg128#8,320(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,320(<input_0=%rdi)
movdqu % xmm7, 320( % rdi)
# qhasm: t0 = r2[0]r3[0]
# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
vpunpcklqdq % xmm10, % xmm14, % xmm7
# qhasm: mem128[ input_0 + 336 ] = t0
# asm 1: movdqu <t0=reg128#8,336(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,336(<input_0=%rdi)
movdqu % xmm7, 336( % rdi)
# qhasm: t0 = r4[0]r5[0]
# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
vpunpcklqdq % xmm8, % xmm11, % xmm7
# qhasm: mem128[ input_0 + 352 ] = t0
# asm 1: movdqu <t0=reg128#8,352(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,352(<input_0=%rdi)
movdqu % xmm7, 352( % rdi)
# qhasm: t0 = r6[0]r7[0]
# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
vpunpcklqdq % xmm6, % xmm12, % xmm6
# qhasm: mem128[ input_0 + 368 ] = t0
# asm 1: movdqu <t0=reg128#7,368(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm6,368(<input_0=%rdi)
movdqu % xmm6, 368( % rdi)
# qhasm: r0 = mem64[ input_0 + 384 ] x2
# asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6
movddup 384( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 392 ] x2
# asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7
movddup 392( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 400 ] x2
# asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8
movddup 400( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 408 ] x2
# asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9
movddup 408( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 416 ] x2
# asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10
movddup 416( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 424 ] x2
# asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11
movddup 424( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 432 ] x2
# asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12
movddup 432( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 440 ] x2
# asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13
movddup 440( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = r4 & mask0
# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r0 & mask1
# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = r5 & mask0
# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r1 & mask1
# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = r6 & mask0
# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r2 & mask1
# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = r7 & mask0
# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r3 & mask1
# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = r2 & mask2
# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r0 & mask3
# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = r3 & mask2
# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r1 & mask3
# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = r6 & mask2
# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r4 & mask3
# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = r7 & mask2
# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = r5 & mask3
# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = r1 & mask4
# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r0 & mask5
# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = r3 & mask4
# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r2 & mask5
# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = r5 & mask4
# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r4 & mask5
# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = r7 & mask4
# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = r6 & mask5
# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: t0 = r0[0]r1[0]
# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
vpunpcklqdq % xmm13, % xmm9, % xmm7
# qhasm: mem128[ input_0 + 384 ] = t0
# asm 1: movdqu <t0=reg128#8,384(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,384(<input_0=%rdi)
movdqu % xmm7, 384( % rdi)
# qhasm: t0 = r2[0]r3[0]
# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
vpunpcklqdq % xmm10, % xmm14, % xmm7
# qhasm: mem128[ input_0 + 400 ] = t0
# asm 1: movdqu <t0=reg128#8,400(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,400(<input_0=%rdi)
movdqu % xmm7, 400( % rdi)
# qhasm: t0 = r4[0]r5[0]
# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
vpunpcklqdq % xmm8, % xmm11, % xmm7
# qhasm: mem128[ input_0 + 416 ] = t0
# asm 1: movdqu <t0=reg128#8,416(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm7,416(<input_0=%rdi)
movdqu % xmm7, 416( % rdi)
# qhasm: t0 = r6[0]r7[0]
# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
vpunpcklqdq % xmm6, % xmm12, % xmm6
# qhasm: mem128[ input_0 + 432 ] = t0
# asm 1: movdqu <t0=reg128#7,432(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm6,432(<input_0=%rdi)
movdqu % xmm6, 432( % rdi)
# qhasm: r0 = mem64[ input_0 + 448 ] x2
# asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7
# asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6
movddup 448( % rdi), % xmm6
# qhasm: r1 = mem64[ input_0 + 456 ] x2
# asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8
# asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7
movddup 456( % rdi), % xmm7
# qhasm: r2 = mem64[ input_0 + 464 ] x2
# asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9
# asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8
movddup 464( % rdi), % xmm8
# qhasm: r3 = mem64[ input_0 + 472 ] x2
# asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10
# asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9
movddup 472( % rdi), % xmm9
# qhasm: r4 = mem64[ input_0 + 480 ] x2
# asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11
# asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10
movddup 480( % rdi), % xmm10
# qhasm: r5 = mem64[ input_0 + 488 ] x2
# asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12
# asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11
movddup 488( % rdi), % xmm11
# qhasm: r6 = mem64[ input_0 + 496 ] x2
# asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13
# asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12
movddup 496( % rdi), % xmm12
# qhasm: r7 = mem64[ input_0 + 504 ] x2
# asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14
# asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13
movddup 504( % rdi), % xmm13
# qhasm: v00 = r0 & mask0
# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = r4 & mask0
# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r0 & mask1
# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = r4 & mask1
# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: r4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = r1 & mask0
# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = r5 & mask0
# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r1 & mask1
# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = r5 & mask1
# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = r2 & mask0
# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = r6 & mask0
# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = r2 & mask1
# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = r6 & mask1
# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = r3 & mask0
# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = r7 & mask0
# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1
# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0
vpand % xmm0, % xmm13, % xmm0
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#1
# asm 2: psllq $4,<v10=%xmm0
psllq $4, % xmm0
# qhasm: v01 = r3 & mask1
# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = r7 & mask1
# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2
# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1
vpand % xmm1, % xmm13, % xmm1
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: r3 = v00 | v10
# asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1
# asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0
vpor % xmm0, % xmm12, % xmm0
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1
vpor % xmm1, % xmm9, % xmm1
# qhasm: v00 = r0 & mask2
# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10
# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9
vpand % xmm2, % xmm14, % xmm9
# qhasm: v10 = r2 & mask2
# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13
# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12
vpand % xmm2, % xmm11, % xmm12
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#13
# asm 2: psllq $2,<v10=%xmm12
psllq $2, % xmm12
# qhasm: v01 = r0 & mask3
# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14
# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13
vpand % xmm3, % xmm14, % xmm13
# qhasm: v11 = r2 & mask3
# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#14
# asm 2: psrlq $2,<v01=%xmm13
psrlq $2, % xmm13
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10
# asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9
vpor % xmm12, % xmm9, % xmm9
# qhasm: r2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11
vpor % xmm11, % xmm13, % xmm11
# qhasm: v00 = r1 & mask2
# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12
vpand % xmm2, % xmm10, % xmm12
# qhasm: v10 = r3 & mask2
# asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14
# asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13
vpand % xmm2, % xmm0, % xmm13
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#14
# asm 2: psllq $2,<v10=%xmm13
psllq $2, % xmm13
# qhasm: v01 = r1 & mask3
# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = r3 & mask3
# asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1
# asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0
vpand % xmm3, % xmm0, % xmm0
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: r1 = v00 | v10
# asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13
# asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12
vpor % xmm13, % xmm12, % xmm12
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1
# asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0
vpor % xmm0, % xmm10, % xmm0
# qhasm: v00 = r4 & mask2
# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11
# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10
vpand % xmm2, % xmm6, % xmm10
# qhasm: v10 = r6 & mask2
# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14
# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13
vpand % xmm2, % xmm8, % xmm13
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#14
# asm 2: psllq $2,<v10=%xmm13
psllq $2, % xmm13
# qhasm: v01 = r4 & mask3
# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = r6 & mask3
# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11
# asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10
vpor % xmm13, % xmm10, % xmm10
# qhasm: r6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = r5 & mask2
# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = r7 & mask2
# asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3
# asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2
vpand % xmm2, % xmm1, % xmm2
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#3
# asm 2: psllq $2,<v10=%xmm2
psllq $2, % xmm2
# qhasm: v01 = r5 & mask3
# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = r7 & mask3
# asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2
# asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1
vpand % xmm3, % xmm1, % xmm1
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: r5 = v00 | v10
# asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3
# asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2
vpor % xmm2, % xmm8, % xmm2
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1
vpor % xmm1, % xmm7, % xmm1
# qhasm: v00 = r0 & mask4
# asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4
# asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3
vpand % xmm4, % xmm9, % xmm3
# qhasm: v10 = r1 & mask4
# asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8
# asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7
vpand % xmm4, % xmm12, % xmm7
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#8
# asm 2: psllq $1,<v10=%xmm7
psllq $1, % xmm7
# qhasm: v01 = r0 & mask5
# asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9
# asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8
vpand % xmm5, % xmm9, % xmm8
# qhasm: v11 = r1 & mask5
# asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10
# asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9
vpand % xmm5, % xmm12, % xmm9
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#9
# asm 2: psrlq $1,<v01=%xmm8
psrlq $1, % xmm8
# qhasm: r0 = v00 | v10
# asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4
# asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3
vpor % xmm7, % xmm3, % xmm3
# qhasm: r1 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7
vpor % xmm9, % xmm8, % xmm7
# qhasm: v00 = r2 & mask4
# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9
# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8
vpand % xmm4, % xmm11, % xmm8
# qhasm: v10 = r3 & mask4
# asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10
# asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9
vpand % xmm4, % xmm0, % xmm9
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#10
# asm 2: psllq $1,<v10=%xmm9
psllq $1, % xmm9
# qhasm: v01 = r2 & mask5
# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = r3 & mask5
# asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1
# asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0
vpand % xmm5, % xmm0, % xmm0
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: r2 = v00 | v10
# asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9
# asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8
vpor % xmm9, % xmm8, % xmm8
# qhasm: r3 = v01 | v11
# asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1
# asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0
vpor % xmm0, % xmm11, % xmm0
# qhasm: v00 = r4 & mask4
# asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9
vpand % xmm4, % xmm10, % xmm9
# qhasm: v10 = r5 & mask4
# asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12
# asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11
vpand % xmm4, % xmm2, % xmm11
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#12
# asm 2: psllq $1,<v10=%xmm11
psllq $1, % xmm11
# qhasm: v01 = r4 & mask5
# asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11
# asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: v11 = r5 & mask5
# asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3
# asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2
vpand % xmm5, % xmm2, % xmm2
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#11
# asm 2: psrlq $1,<v01=%xmm10
psrlq $1, % xmm10
# qhasm: r4 = v00 | v10
# asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10
# asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9
vpor % xmm11, % xmm9, % xmm9
# qhasm: r5 = v01 | v11
# asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3
# asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2
vpor % xmm2, % xmm10, % xmm2
# qhasm: v00 = r6 & mask4
# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11
# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10
vpand % xmm4, % xmm6, % xmm10
# qhasm: v10 = r7 & mask4
# asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5
# asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4
vpand % xmm4, % xmm1, % xmm4
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#5
# asm 2: psllq $1,<v10=%xmm4
psllq $1, % xmm4
# qhasm: v01 = r6 & mask5
# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = r7 & mask5
# asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2
# asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1
vpand % xmm5, % xmm1, % xmm1
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: r6 = v00 | v10
# asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5
# asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4
vpor % xmm4, % xmm10, % xmm4
# qhasm: r7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1
vpor % xmm1, % xmm6, % xmm1
# qhasm: t0 = r0[0]r1[0]
# asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4
# asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3
vpunpcklqdq % xmm7, % xmm3, % xmm3
# qhasm: mem128[ input_0 + 448 ] = t0
# asm 1: movdqu <t0=reg128#4,448(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm3,448(<input_0=%rdi)
movdqu % xmm3, 448( % rdi)
# qhasm: t0 = r2[0]r3[0]
# asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1
# asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0
vpunpcklqdq % xmm0, % xmm8, % xmm0
# qhasm: mem128[ input_0 + 464 ] = t0
# asm 1: movdqu <t0=reg128#1,464(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm0,464(<input_0=%rdi)
movdqu % xmm0, 464( % rdi)
# qhasm: t0 = r4[0]r5[0]
# asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1
# asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0
vpunpcklqdq % xmm2, % xmm9, % xmm0
# qhasm: mem128[ input_0 + 480 ] = t0
# asm 1: movdqu <t0=reg128#1,480(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm0,480(<input_0=%rdi)
movdqu % xmm0, 480( % rdi)
# qhasm: t0 = r6[0]r7[0]
# asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1
# asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0
vpunpcklqdq % xmm1, % xmm4, % xmm0
# qhasm: mem128[ input_0 + 496 ] = t0
# asm 1: movdqu <t0=reg128#1,496(<input_0=int64#1)
# asm 2: movdqu <t0=%xmm0,496(<input_0=%rdi)
movdqu % xmm0, 496( % rdi)
# qhasm: return
add % r11, % rsp
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 2,712
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/consts.S
|
#include "namespace.h"
#if defined(__APPLE__)
#define ASM_HIDDEN .private_extern
#else
#define ASM_HIDDEN .hidden
#endif
#define MASK0_0 CRYPTO_NAMESPACE(MASK0_0)
#define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0)
#define MASK0_1 CRYPTO_NAMESPACE(MASK0_1)
#define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1)
#define MASK1_0 CRYPTO_NAMESPACE(MASK1_0)
#define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0)
#define MASK1_1 CRYPTO_NAMESPACE(MASK1_1)
#define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1)
#define MASK2_0 CRYPTO_NAMESPACE(MASK2_0)
#define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0)
#define MASK2_1 CRYPTO_NAMESPACE(MASK2_1)
#define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1)
#define MASK3_0 CRYPTO_NAMESPACE(MASK3_0)
#define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0)
#define MASK3_1 CRYPTO_NAMESPACE(MASK3_1)
#define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1)
#define MASK4_0 CRYPTO_NAMESPACE(MASK4_0)
#define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0)
#define MASK4_1 CRYPTO_NAMESPACE(MASK4_1)
#define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1)
#define MASK5_0 CRYPTO_NAMESPACE(MASK5_0)
#define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0)
#define MASK5_1 CRYPTO_NAMESPACE(MASK5_1)
#define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1)
.data
ASM_HIDDEN MASK0_0
ASM_HIDDEN MASK0_1
ASM_HIDDEN MASK1_0
ASM_HIDDEN MASK1_1
ASM_HIDDEN MASK2_0
ASM_HIDDEN MASK2_1
ASM_HIDDEN MASK3_0
ASM_HIDDEN MASK3_1
ASM_HIDDEN MASK4_0
ASM_HIDDEN MASK4_1
ASM_HIDDEN MASK5_0
ASM_HIDDEN MASK5_1
.globl MASK0_0
.globl MASK0_1
.globl MASK1_0
.globl MASK1_1
.globl MASK2_0
.globl MASK2_1
.globl MASK3_0
.globl MASK3_1
.globl MASK4_0
.globl MASK4_1
.globl MASK5_0
.globl MASK5_1
.p2align 5
MASK0_0:
.quad 0x5555555555555555, 0x5555555555555555, 0x5555555555555555, 0x5555555555555555
MASK0_1:
.quad 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA
MASK1_0:
.quad 0x3333333333333333, 0x3333333333333333, 0x3333333333333333, 0x3333333333333333
MASK1_1:
.quad 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC
MASK2_0:
.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
MASK2_1:
.quad 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0
MASK3_0:
.quad 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF, 0x00FF00FF00FF00FF
MASK3_1:
.quad 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00
MASK4_0:
.quad 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF
MASK4_1:
.quad 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000
MASK5_0:
.quad 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF, 0x00000000FFFFFFFF
MASK5_1:
.quad 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000, 0xFFFFFFFF00000000
|
mktmansour/MKT-KSA-Geolocation-Security
| 14,915
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/update_asm.S
|
#include "namespace.h"
#define update_asm CRYPTO_NAMESPACE(update_asm)
#define _update_asm _CRYPTO_NAMESPACE(update_asm)
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: int64 s0
# qhasm: int64 s1
# qhasm: int64 s2
# qhasm: enter update_asm
.p2align 5
.global _update_asm
.global update_asm
_update_asm:
update_asm:
mov % rsp, % r11
and $31, % r11
add $0, % r11
sub % r11, % rsp
# qhasm: s2 = input_1
# asm 1: mov <input_1=int64#2,>s2=int64#2
# asm 2: mov <input_1=%rsi,>s2=%rsi
mov % rsi, % rsi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: s0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>s0=int64#4
# asm 2: movq 0(<input_0=%rdi),>s0=%rcx
movq 0( % rdi), % rcx
# qhasm: s1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>s1=int64#5
# asm 2: movq 8(<input_0=%rdi),>s1=%r8
movq 8( % rdi), % r8
# qhasm: s0 = (s1 s0) >> 1
# asm 1: shrd $1,<s1=int64#5,<s0=int64#4
# asm 2: shrd $1,<s1=%r8,<s0=%rcx
shrd $1, % r8, % rcx
# qhasm: s1 = (s2 s1) >> 1
# asm 1: shrd $1,<s2=int64#2,<s1=int64#5
# asm 2: shrd $1,<s2=%rsi,<s1=%r8
shrd $1, % rsi, % r8
# qhasm: (uint64) s2 >>= 1
# asm 1: shr $1,<s2=int64#2
# asm 2: shr $1,<s2=%rsi
shr $1, % rsi
# qhasm: mem64[ input_0 + 0 ] = s0
# asm 1: movq <s0=int64#4,0(<input_0=int64#1)
# asm 2: movq <s0=%rcx,0(<input_0=%rdi)
movq % rcx, 0( % rdi)
# qhasm: mem64[ input_0 + 8 ] = s1
# asm 1: movq <s1=int64#5,8(<input_0=int64#1)
# asm 2: movq <s1=%r8,8(<input_0=%rdi)
movq % r8, 8( % rdi)
# qhasm: input_0 += input_2
# asm 1: add <input_2=int64#3,<input_0=int64#1
# asm 2: add <input_2=%rdx,<input_0=%rdi
add % rdx, % rdi
# qhasm: return
add % r11, % rsp
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 53,565
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/vec128_mul_asm.S
|
#include "namespace.h"
#define vec128_mul_asm CRYPTO_NAMESPACE(vec128_mul_asm)
#define _vec128_mul_asm _CRYPTO_NAMESPACE(vec128_mul_asm)
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: reg256 b0
# qhasm: reg256 b1
# qhasm: reg256 b2
# qhasm: reg256 b3
# qhasm: reg256 b4
# qhasm: reg256 b5
# qhasm: reg256 b6
# qhasm: reg256 b7
# qhasm: reg256 b8
# qhasm: reg256 b9
# qhasm: reg256 b10
# qhasm: reg256 b11
# qhasm: reg256 b12
# qhasm: reg256 a0
# qhasm: reg256 a1
# qhasm: reg256 a2
# qhasm: reg256 a3
# qhasm: reg256 a4
# qhasm: reg256 a5
# qhasm: reg256 a6
# qhasm: reg256 r0
# qhasm: reg256 r1
# qhasm: reg256 r2
# qhasm: reg256 r3
# qhasm: reg256 r4
# qhasm: reg256 r5
# qhasm: reg256 r6
# qhasm: reg256 r7
# qhasm: reg256 r8
# qhasm: reg256 r9
# qhasm: reg256 r10
# qhasm: reg256 r11
# qhasm: reg256 r12
# qhasm: reg256 r13
# qhasm: reg256 r14
# qhasm: reg256 r15
# qhasm: reg256 r16
# qhasm: reg256 r17
# qhasm: reg256 r18
# qhasm: reg256 r19
# qhasm: reg256 r20
# qhasm: reg256 r21
# qhasm: reg256 r22
# qhasm: reg256 r23
# qhasm: reg256 r24
# qhasm: reg256 r
# qhasm: reg128 h0
# qhasm: reg128 h1
# qhasm: reg128 h2
# qhasm: reg128 h3
# qhasm: reg128 h4
# qhasm: reg128 h5
# qhasm: reg128 h6
# qhasm: reg128 h7
# qhasm: reg128 h8
# qhasm: reg128 h9
# qhasm: reg128 h10
# qhasm: reg128 h11
# qhasm: reg128 h12
# qhasm: reg128 h13
# qhasm: reg128 h14
# qhasm: reg128 h15
# qhasm: reg128 h16
# qhasm: reg128 h17
# qhasm: reg128 h18
# qhasm: reg128 h19
# qhasm: reg128 h20
# qhasm: reg128 h21
# qhasm: reg128 h22
# qhasm: reg128 h23
# qhasm: reg128 h24
# qhasm: stack4864 buf
# qhasm: int64 ptr
# qhasm: int64 tmp
# qhasm: enter vec128_mul_asm
.p2align 5
.global _vec128_mul_asm
.global vec128_mul_asm
_vec128_mul_asm:
vec128_mul_asm:
mov % rsp, % r11
and $31, % r11
add $608, % r11
sub % r11, % rsp
# qhasm: ptr = &buf
# asm 1: leaq <buf=stack4864#1,>ptr=int64#5
# asm 2: leaq <buf=0(%rsp),>ptr=%r8
leaq 0( % rsp), % r8
# qhasm: tmp = input_3
# asm 1: mov <input_3=int64#4,>tmp=int64#6
# asm 2: mov <input_3=%rcx,>tmp=%r9
mov % rcx, % r9
# qhasm: tmp *= 12
# asm 1: imulq $12,<tmp=int64#6,>tmp=int64#6
# asm 2: imulq $12,<tmp=%r9,>tmp=%r9
imulq $12, % r9, % r9
# qhasm: input_2 += tmp
# asm 1: add <tmp=int64#6,<input_2=int64#3
# asm 2: add <tmp=%r9,<input_2=%rdx
add % r9, % rdx
# qhasm: b12 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b12=reg256#1
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b12=%ymm0
vbroadcasti128 0( % rdx), % ymm0
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: a6 = a6 ^ a6
# asm 1: vpxor <a6=reg256#2,<a6=reg256#2,>a6=reg256#2
# asm 2: vpxor <a6=%ymm1,<a6=%ymm1,>a6=%ymm1
vpxor % ymm1, % ymm1, % ymm1
# qhasm: a6[0] = mem128[ input_1 + 96 ]
# asm 1: vinsertf128 $0x0,96(<input_1=int64#2),<a6=reg256#2,<a6=reg256#2
# asm 2: vinsertf128 $0x0,96(<input_1=%rsi),<a6=%ymm1,<a6=%ymm1
vinsertf128 $0x0, 96( % rsi), % ymm1, % ymm1
# qhasm: r18 = b12 & a6
# asm 1: vpand <b12=reg256#1,<a6=reg256#2,>r18=reg256#3
# asm 2: vpand <b12=%ymm0,<a6=%ymm1,>r18=%ymm2
vpand % ymm0, % ymm1, % ymm2
# qhasm: mem256[ ptr + 576 ] = r18
# asm 1: vmovupd <r18=reg256#3,576(<ptr=int64#5)
# asm 2: vmovupd <r18=%ymm2,576(<ptr=%r8)
vmovupd % ymm2, 576( % r8)
# qhasm: a5[0] = mem128[ input_1 + 80 ]
# asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3
# asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2
vinsertf128 $0x0, 80( % rsi), % ymm2, % ymm2
# qhasm: a5[1] = mem128[ input_1 + 192 ]
# asm 1: vinsertf128 $0x1,192(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3
# asm 2: vinsertf128 $0x1,192(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2
vinsertf128 $0x1, 192( % rsi), % ymm2, % ymm2
# qhasm: r17 = b12 & a5
# asm 1: vpand <b12=reg256#1,<a5=reg256#3,>r17=reg256#4
# asm 2: vpand <b12=%ymm0,<a5=%ymm2,>r17=%ymm3
vpand % ymm0, % ymm2, % ymm3
# qhasm: a4[0] = mem128[ input_1 + 64 ]
# asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5
# asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4
vinsertf128 $0x0, 64( % rsi), % ymm4, % ymm4
# qhasm: a4[1] = mem128[ input_1 + 176 ]
# asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5
# asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4
vinsertf128 $0x1, 176( % rsi), % ymm4, % ymm4
# qhasm: r16 = b12 & a4
# asm 1: vpand <b12=reg256#1,<a4=reg256#5,>r16=reg256#6
# asm 2: vpand <b12=%ymm0,<a4=%ymm4,>r16=%ymm5
vpand % ymm0, % ymm4, % ymm5
# qhasm: a3[0] = mem128[ input_1 + 48 ]
# asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7
# asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6
vinsertf128 $0x0, 48( % rsi), % ymm6, % ymm6
# qhasm: a3[1] = mem128[ input_1 + 160 ]
# asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7
# asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6
vinsertf128 $0x1, 160( % rsi), % ymm6, % ymm6
# qhasm: r15 = b12 & a3
# asm 1: vpand <b12=reg256#1,<a3=reg256#7,>r15=reg256#8
# asm 2: vpand <b12=%ymm0,<a3=%ymm6,>r15=%ymm7
vpand % ymm0, % ymm6, % ymm7
# qhasm: a2[0] = mem128[ input_1 + 32 ]
# asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9
# asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8
vinsertf128 $0x0, 32( % rsi), % ymm8, % ymm8
# qhasm: a2[1] = mem128[ input_1 + 144 ]
# asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9
# asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8
vinsertf128 $0x1, 144( % rsi), % ymm8, % ymm8
# qhasm: r14 = b12 & a2
# asm 1: vpand <b12=reg256#1,<a2=reg256#9,>r14=reg256#10
# asm 2: vpand <b12=%ymm0,<a2=%ymm8,>r14=%ymm9
vpand % ymm0, % ymm8, % ymm9
# qhasm: a1[0] = mem128[ input_1 + 16 ]
# asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11
# asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10
vinsertf128 $0x0, 16( % rsi), % ymm10, % ymm10
# qhasm: a1[1] = mem128[ input_1 + 128 ]
# asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11
# asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10
vinsertf128 $0x1, 128( % rsi), % ymm10, % ymm10
# qhasm: r13 = b12 & a1
# asm 1: vpand <b12=reg256#1,<a1=reg256#11,>r13=reg256#12
# asm 2: vpand <b12=%ymm0,<a1=%ymm10,>r13=%ymm11
vpand % ymm0, % ymm10, % ymm11
# qhasm: a0[0] = mem128[ input_1 + 0 ]
# asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13
# asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12
vinsertf128 $0x0, 0( % rsi), % ymm12, % ymm12
# qhasm: a0[1] = mem128[ input_1 + 112 ]
# asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13
# asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12
vinsertf128 $0x1, 112( % rsi), % ymm12, % ymm12
# qhasm: r12 = b12 & a0
# asm 1: vpand <b12=reg256#1,<a0=reg256#13,>r12=reg256#1
# asm 2: vpand <b12=%ymm0,<a0=%ymm12,>r12=%ymm0
vpand % ymm0, % ymm12, % ymm0
# qhasm: b11 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b11=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b11=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b11 & a6
# asm 1: vpand <b11=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b11=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#15,<r17=reg256#4,<r17=reg256#4
# asm 2: vpxor <r=%ymm14,<r17=%ymm3,<r17=%ymm3
vpxor % ymm14, % ymm3, % ymm3
# qhasm: mem256[ ptr + 544 ] = r17
# asm 1: vmovupd <r17=reg256#4,544(<ptr=int64#5)
# asm 2: vmovupd <r17=%ymm3,544(<ptr=%r8)
vmovupd % ymm3, 544( % r8)
# qhasm: r = b11 & a5
# asm 1: vpand <b11=reg256#14,<a5=reg256#3,>r=reg256#4
# asm 2: vpand <b11=%ymm13,<a5=%ymm2,>r=%ymm3
vpand % ymm13, % ymm2, % ymm3
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#4,<r16=reg256#6,<r16=reg256#6
# asm 2: vpxor <r=%ymm3,<r16=%ymm5,<r16=%ymm5
vpxor % ymm3, % ymm5, % ymm5
# qhasm: r = b11 & a4
# asm 1: vpand <b11=reg256#14,<a4=reg256#5,>r=reg256#4
# asm 2: vpand <b11=%ymm13,<a4=%ymm4,>r=%ymm3
vpand % ymm13, % ymm4, % ymm3
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#4,<r15=reg256#8,<r15=reg256#8
# asm 2: vpxor <r=%ymm3,<r15=%ymm7,<r15=%ymm7
vpxor % ymm3, % ymm7, % ymm7
# qhasm: r = b11 & a3
# asm 1: vpand <b11=reg256#14,<a3=reg256#7,>r=reg256#4
# asm 2: vpand <b11=%ymm13,<a3=%ymm6,>r=%ymm3
vpand % ymm13, % ymm6, % ymm3
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#4,<r14=reg256#10,<r14=reg256#10
# asm 2: vpxor <r=%ymm3,<r14=%ymm9,<r14=%ymm9
vpxor % ymm3, % ymm9, % ymm9
# qhasm: r = b11 & a2
# asm 1: vpand <b11=reg256#14,<a2=reg256#9,>r=reg256#4
# asm 2: vpand <b11=%ymm13,<a2=%ymm8,>r=%ymm3
vpand % ymm13, % ymm8, % ymm3
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#4,<r13=reg256#12,<r13=reg256#12
# asm 2: vpxor <r=%ymm3,<r13=%ymm11,<r13=%ymm11
vpxor % ymm3, % ymm11, % ymm11
# qhasm: r = b11 & a1
# asm 1: vpand <b11=reg256#14,<a1=reg256#11,>r=reg256#4
# asm 2: vpand <b11=%ymm13,<a1=%ymm10,>r=%ymm3
vpand % ymm13, % ymm10, % ymm3
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#4,<r12=reg256#1,<r12=reg256#1
# asm 2: vpxor <r=%ymm3,<r12=%ymm0,<r12=%ymm0
vpxor % ymm3, % ymm0, % ymm0
# qhasm: r11 = b11 & a0
# asm 1: vpand <b11=reg256#14,<a0=reg256#13,>r11=reg256#4
# asm 2: vpand <b11=%ymm13,<a0=%ymm12,>r11=%ymm3
vpand % ymm13, % ymm12, % ymm3
# qhasm: b10 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b10=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b10=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b10 & a6
# asm 1: vpand <b10=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b10=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#15,<r16=reg256#6,<r16=reg256#6
# asm 2: vpxor <r=%ymm14,<r16=%ymm5,<r16=%ymm5
vpxor % ymm14, % ymm5, % ymm5
# qhasm: mem256[ ptr + 512 ] = r16
# asm 1: vmovupd <r16=reg256#6,512(<ptr=int64#5)
# asm 2: vmovupd <r16=%ymm5,512(<ptr=%r8)
vmovupd % ymm5, 512( % r8)
# qhasm: r = b10 & a5
# asm 1: vpand <b10=reg256#14,<a5=reg256#3,>r=reg256#6
# asm 2: vpand <b10=%ymm13,<a5=%ymm2,>r=%ymm5
vpand % ymm13, % ymm2, % ymm5
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#6,<r15=reg256#8,<r15=reg256#8
# asm 2: vpxor <r=%ymm5,<r15=%ymm7,<r15=%ymm7
vpxor % ymm5, % ymm7, % ymm7
# qhasm: r = b10 & a4
# asm 1: vpand <b10=reg256#14,<a4=reg256#5,>r=reg256#6
# asm 2: vpand <b10=%ymm13,<a4=%ymm4,>r=%ymm5
vpand % ymm13, % ymm4, % ymm5
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#6,<r14=reg256#10,<r14=reg256#10
# asm 2: vpxor <r=%ymm5,<r14=%ymm9,<r14=%ymm9
vpxor % ymm5, % ymm9, % ymm9
# qhasm: r = b10 & a3
# asm 1: vpand <b10=reg256#14,<a3=reg256#7,>r=reg256#6
# asm 2: vpand <b10=%ymm13,<a3=%ymm6,>r=%ymm5
vpand % ymm13, % ymm6, % ymm5
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#6,<r13=reg256#12,<r13=reg256#12
# asm 2: vpxor <r=%ymm5,<r13=%ymm11,<r13=%ymm11
vpxor % ymm5, % ymm11, % ymm11
# qhasm: r = b10 & a2
# asm 1: vpand <b10=reg256#14,<a2=reg256#9,>r=reg256#6
# asm 2: vpand <b10=%ymm13,<a2=%ymm8,>r=%ymm5
vpand % ymm13, % ymm8, % ymm5
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#6,<r12=reg256#1,<r12=reg256#1
# asm 2: vpxor <r=%ymm5,<r12=%ymm0,<r12=%ymm0
vpxor % ymm5, % ymm0, % ymm0
# qhasm: r = b10 & a1
# asm 1: vpand <b10=reg256#14,<a1=reg256#11,>r=reg256#6
# asm 2: vpand <b10=%ymm13,<a1=%ymm10,>r=%ymm5
vpand % ymm13, % ymm10, % ymm5
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#6,<r11=reg256#4,<r11=reg256#4
# asm 2: vpxor <r=%ymm5,<r11=%ymm3,<r11=%ymm3
vpxor % ymm5, % ymm3, % ymm3
# qhasm: r10 = b10 & a0
# asm 1: vpand <b10=reg256#14,<a0=reg256#13,>r10=reg256#6
# asm 2: vpand <b10=%ymm13,<a0=%ymm12,>r10=%ymm5
vpand % ymm13, % ymm12, % ymm5
# qhasm: b9 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b9=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b9=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b9 & a6
# asm 1: vpand <b9=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b9=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#15,<r15=reg256#8,<r15=reg256#8
# asm 2: vpxor <r=%ymm14,<r15=%ymm7,<r15=%ymm7
vpxor % ymm14, % ymm7, % ymm7
# qhasm: mem256[ ptr + 480 ] = r15
# asm 1: vmovupd <r15=reg256#8,480(<ptr=int64#5)
# asm 2: vmovupd <r15=%ymm7,480(<ptr=%r8)
vmovupd % ymm7, 480( % r8)
# qhasm: r = b9 & a5
# asm 1: vpand <b9=reg256#14,<a5=reg256#3,>r=reg256#8
# asm 2: vpand <b9=%ymm13,<a5=%ymm2,>r=%ymm7
vpand % ymm13, % ymm2, % ymm7
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#8,<r14=reg256#10,<r14=reg256#10
# asm 2: vpxor <r=%ymm7,<r14=%ymm9,<r14=%ymm9
vpxor % ymm7, % ymm9, % ymm9
# qhasm: r = b9 & a4
# asm 1: vpand <b9=reg256#14,<a4=reg256#5,>r=reg256#8
# asm 2: vpand <b9=%ymm13,<a4=%ymm4,>r=%ymm7
vpand % ymm13, % ymm4, % ymm7
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#8,<r13=reg256#12,<r13=reg256#12
# asm 2: vpxor <r=%ymm7,<r13=%ymm11,<r13=%ymm11
vpxor % ymm7, % ymm11, % ymm11
# qhasm: r = b9 & a3
# asm 1: vpand <b9=reg256#14,<a3=reg256#7,>r=reg256#8
# asm 2: vpand <b9=%ymm13,<a3=%ymm6,>r=%ymm7
vpand % ymm13, % ymm6, % ymm7
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#8,<r12=reg256#1,<r12=reg256#1
# asm 2: vpxor <r=%ymm7,<r12=%ymm0,<r12=%ymm0
vpxor % ymm7, % ymm0, % ymm0
# qhasm: r = b9 & a2
# asm 1: vpand <b9=reg256#14,<a2=reg256#9,>r=reg256#8
# asm 2: vpand <b9=%ymm13,<a2=%ymm8,>r=%ymm7
vpand % ymm13, % ymm8, % ymm7
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#8,<r11=reg256#4,<r11=reg256#4
# asm 2: vpxor <r=%ymm7,<r11=%ymm3,<r11=%ymm3
vpxor % ymm7, % ymm3, % ymm3
# qhasm: r = b9 & a1
# asm 1: vpand <b9=reg256#14,<a1=reg256#11,>r=reg256#8
# asm 2: vpand <b9=%ymm13,<a1=%ymm10,>r=%ymm7
vpand % ymm13, % ymm10, % ymm7
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#8,<r10=reg256#6,<r10=reg256#6
# asm 2: vpxor <r=%ymm7,<r10=%ymm5,<r10=%ymm5
vpxor % ymm7, % ymm5, % ymm5
# qhasm: r9 = b9 & a0
# asm 1: vpand <b9=reg256#14,<a0=reg256#13,>r9=reg256#8
# asm 2: vpand <b9=%ymm13,<a0=%ymm12,>r9=%ymm7
vpand % ymm13, % ymm12, % ymm7
# qhasm: b8 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b8=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b8=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b8 & a6
# asm 1: vpand <b8=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b8=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#15,<r14=reg256#10,<r14=reg256#10
# asm 2: vpxor <r=%ymm14,<r14=%ymm9,<r14=%ymm9
vpxor % ymm14, % ymm9, % ymm9
# qhasm: mem256[ ptr + 448 ] = r14
# asm 1: vmovupd <r14=reg256#10,448(<ptr=int64#5)
# asm 2: vmovupd <r14=%ymm9,448(<ptr=%r8)
vmovupd % ymm9, 448( % r8)
# qhasm: r = b8 & a5
# asm 1: vpand <b8=reg256#14,<a5=reg256#3,>r=reg256#10
# asm 2: vpand <b8=%ymm13,<a5=%ymm2,>r=%ymm9
vpand % ymm13, % ymm2, % ymm9
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#10,<r13=reg256#12,<r13=reg256#12
# asm 2: vpxor <r=%ymm9,<r13=%ymm11,<r13=%ymm11
vpxor % ymm9, % ymm11, % ymm11
# qhasm: r = b8 & a4
# asm 1: vpand <b8=reg256#14,<a4=reg256#5,>r=reg256#10
# asm 2: vpand <b8=%ymm13,<a4=%ymm4,>r=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#10,<r12=reg256#1,<r12=reg256#1
# asm 2: vpxor <r=%ymm9,<r12=%ymm0,<r12=%ymm0
vpxor % ymm9, % ymm0, % ymm0
# qhasm: r = b8 & a3
# asm 1: vpand <b8=reg256#14,<a3=reg256#7,>r=reg256#10
# asm 2: vpand <b8=%ymm13,<a3=%ymm6,>r=%ymm9
vpand % ymm13, % ymm6, % ymm9
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#10,<r11=reg256#4,<r11=reg256#4
# asm 2: vpxor <r=%ymm9,<r11=%ymm3,<r11=%ymm3
vpxor % ymm9, % ymm3, % ymm3
# qhasm: r = b8 & a2
# asm 1: vpand <b8=reg256#14,<a2=reg256#9,>r=reg256#10
# asm 2: vpand <b8=%ymm13,<a2=%ymm8,>r=%ymm9
vpand % ymm13, % ymm8, % ymm9
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#10,<r10=reg256#6,<r10=reg256#6
# asm 2: vpxor <r=%ymm9,<r10=%ymm5,<r10=%ymm5
vpxor % ymm9, % ymm5, % ymm5
# qhasm: r = b8 & a1
# asm 1: vpand <b8=reg256#14,<a1=reg256#11,>r=reg256#10
# asm 2: vpand <b8=%ymm13,<a1=%ymm10,>r=%ymm9
vpand % ymm13, % ymm10, % ymm9
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#10,<r9=reg256#8,<r9=reg256#8
# asm 2: vpxor <r=%ymm9,<r9=%ymm7,<r9=%ymm7
vpxor % ymm9, % ymm7, % ymm7
# qhasm: r8 = b8 & a0
# asm 1: vpand <b8=reg256#14,<a0=reg256#13,>r8=reg256#10
# asm 2: vpand <b8=%ymm13,<a0=%ymm12,>r8=%ymm9
vpand % ymm13, % ymm12, % ymm9
# qhasm: b7 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b7=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b7=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b7 & a6
# asm 1: vpand <b7=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b7=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#15,<r13=reg256#12,<r13=reg256#12
# asm 2: vpxor <r=%ymm14,<r13=%ymm11,<r13=%ymm11
vpxor % ymm14, % ymm11, % ymm11
# qhasm: mem256[ ptr + 416 ] = r13
# asm 1: vmovupd <r13=reg256#12,416(<ptr=int64#5)
# asm 2: vmovupd <r13=%ymm11,416(<ptr=%r8)
vmovupd % ymm11, 416( % r8)
# qhasm: r = b7 & a5
# asm 1: vpand <b7=reg256#14,<a5=reg256#3,>r=reg256#12
# asm 2: vpand <b7=%ymm13,<a5=%ymm2,>r=%ymm11
vpand % ymm13, % ymm2, % ymm11
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#12,<r12=reg256#1,<r12=reg256#1
# asm 2: vpxor <r=%ymm11,<r12=%ymm0,<r12=%ymm0
vpxor % ymm11, % ymm0, % ymm0
# qhasm: r = b7 & a4
# asm 1: vpand <b7=reg256#14,<a4=reg256#5,>r=reg256#12
# asm 2: vpand <b7=%ymm13,<a4=%ymm4,>r=%ymm11
vpand % ymm13, % ymm4, % ymm11
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#12,<r11=reg256#4,<r11=reg256#4
# asm 2: vpxor <r=%ymm11,<r11=%ymm3,<r11=%ymm3
vpxor % ymm11, % ymm3, % ymm3
# qhasm: r = b7 & a3
# asm 1: vpand <b7=reg256#14,<a3=reg256#7,>r=reg256#12
# asm 2: vpand <b7=%ymm13,<a3=%ymm6,>r=%ymm11
vpand % ymm13, % ymm6, % ymm11
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#12,<r10=reg256#6,<r10=reg256#6
# asm 2: vpxor <r=%ymm11,<r10=%ymm5,<r10=%ymm5
vpxor % ymm11, % ymm5, % ymm5
# qhasm: r = b7 & a2
# asm 1: vpand <b7=reg256#14,<a2=reg256#9,>r=reg256#12
# asm 2: vpand <b7=%ymm13,<a2=%ymm8,>r=%ymm11
vpand % ymm13, % ymm8, % ymm11
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#12,<r9=reg256#8,<r9=reg256#8
# asm 2: vpxor <r=%ymm11,<r9=%ymm7,<r9=%ymm7
vpxor % ymm11, % ymm7, % ymm7
# qhasm: r = b7 & a1
# asm 1: vpand <b7=reg256#14,<a1=reg256#11,>r=reg256#12
# asm 2: vpand <b7=%ymm13,<a1=%ymm10,>r=%ymm11
vpand % ymm13, % ymm10, % ymm11
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#12,<r8=reg256#10,<r8=reg256#10
# asm 2: vpxor <r=%ymm11,<r8=%ymm9,<r8=%ymm9
vpxor % ymm11, % ymm9, % ymm9
# qhasm: r7 = b7 & a0
# asm 1: vpand <b7=reg256#14,<a0=reg256#13,>r7=reg256#12
# asm 2: vpand <b7=%ymm13,<a0=%ymm12,>r7=%ymm11
vpand % ymm13, % ymm12, % ymm11
# qhasm: b6 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b6=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b6=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b6 & a6
# asm 1: vpand <b6=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b6=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#15,<r12=reg256#1,<r12=reg256#1
# asm 2: vpxor <r=%ymm14,<r12=%ymm0,<r12=%ymm0
vpxor % ymm14, % ymm0, % ymm0
# qhasm: mem256[ ptr + 384 ] = r12
# asm 1: vmovupd <r12=reg256#1,384(<ptr=int64#5)
# asm 2: vmovupd <r12=%ymm0,384(<ptr=%r8)
vmovupd % ymm0, 384( % r8)
# qhasm: r = b6 & a5
# asm 1: vpand <b6=reg256#14,<a5=reg256#3,>r=reg256#1
# asm 2: vpand <b6=%ymm13,<a5=%ymm2,>r=%ymm0
vpand % ymm13, % ymm2, % ymm0
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#1,<r11=reg256#4,<r11=reg256#4
# asm 2: vpxor <r=%ymm0,<r11=%ymm3,<r11=%ymm3
vpxor % ymm0, % ymm3, % ymm3
# qhasm: r = b6 & a4
# asm 1: vpand <b6=reg256#14,<a4=reg256#5,>r=reg256#1
# asm 2: vpand <b6=%ymm13,<a4=%ymm4,>r=%ymm0
vpand % ymm13, % ymm4, % ymm0
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#1,<r10=reg256#6,<r10=reg256#6
# asm 2: vpxor <r=%ymm0,<r10=%ymm5,<r10=%ymm5
vpxor % ymm0, % ymm5, % ymm5
# qhasm: r = b6 & a3
# asm 1: vpand <b6=reg256#14,<a3=reg256#7,>r=reg256#1
# asm 2: vpand <b6=%ymm13,<a3=%ymm6,>r=%ymm0
vpand % ymm13, % ymm6, % ymm0
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#1,<r9=reg256#8,<r9=reg256#8
# asm 2: vpxor <r=%ymm0,<r9=%ymm7,<r9=%ymm7
vpxor % ymm0, % ymm7, % ymm7
# qhasm: r = b6 & a2
# asm 1: vpand <b6=reg256#14,<a2=reg256#9,>r=reg256#1
# asm 2: vpand <b6=%ymm13,<a2=%ymm8,>r=%ymm0
vpand % ymm13, % ymm8, % ymm0
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#1,<r8=reg256#10,<r8=reg256#10
# asm 2: vpxor <r=%ymm0,<r8=%ymm9,<r8=%ymm9
vpxor % ymm0, % ymm9, % ymm9
# qhasm: r = b6 & a1
# asm 1: vpand <b6=reg256#14,<a1=reg256#11,>r=reg256#1
# asm 2: vpand <b6=%ymm13,<a1=%ymm10,>r=%ymm0
vpand % ymm13, % ymm10, % ymm0
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#1,<r7=reg256#12,<r7=reg256#12
# asm 2: vpxor <r=%ymm0,<r7=%ymm11,<r7=%ymm11
vpxor % ymm0, % ymm11, % ymm11
# qhasm: r6 = b6 & a0
# asm 1: vpand <b6=reg256#14,<a0=reg256#13,>r6=reg256#1
# asm 2: vpand <b6=%ymm13,<a0=%ymm12,>r6=%ymm0
vpand % ymm13, % ymm12, % ymm0
# qhasm: b5 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b5=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b5=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b5 & a6
# asm 1: vpand <b5=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b5=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#15,<r11=reg256#4,<r11=reg256#4
# asm 2: vpxor <r=%ymm14,<r11=%ymm3,<r11=%ymm3
vpxor % ymm14, % ymm3, % ymm3
# qhasm: mem256[ ptr + 352 ] = r11
# asm 1: vmovupd <r11=reg256#4,352(<ptr=int64#5)
# asm 2: vmovupd <r11=%ymm3,352(<ptr=%r8)
vmovupd % ymm3, 352( % r8)
# qhasm: r = b5 & a5
# asm 1: vpand <b5=reg256#14,<a5=reg256#3,>r=reg256#4
# asm 2: vpand <b5=%ymm13,<a5=%ymm2,>r=%ymm3
vpand % ymm13, % ymm2, % ymm3
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#4,<r10=reg256#6,<r10=reg256#6
# asm 2: vpxor <r=%ymm3,<r10=%ymm5,<r10=%ymm5
vpxor % ymm3, % ymm5, % ymm5
# qhasm: r = b5 & a4
# asm 1: vpand <b5=reg256#14,<a4=reg256#5,>r=reg256#4
# asm 2: vpand <b5=%ymm13,<a4=%ymm4,>r=%ymm3
vpand % ymm13, % ymm4, % ymm3
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#4,<r9=reg256#8,<r9=reg256#8
# asm 2: vpxor <r=%ymm3,<r9=%ymm7,<r9=%ymm7
vpxor % ymm3, % ymm7, % ymm7
# qhasm: r = b5 & a3
# asm 1: vpand <b5=reg256#14,<a3=reg256#7,>r=reg256#4
# asm 2: vpand <b5=%ymm13,<a3=%ymm6,>r=%ymm3
vpand % ymm13, % ymm6, % ymm3
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#4,<r8=reg256#10,<r8=reg256#10
# asm 2: vpxor <r=%ymm3,<r8=%ymm9,<r8=%ymm9
vpxor % ymm3, % ymm9, % ymm9
# qhasm: r = b5 & a2
# asm 1: vpand <b5=reg256#14,<a2=reg256#9,>r=reg256#4
# asm 2: vpand <b5=%ymm13,<a2=%ymm8,>r=%ymm3
vpand % ymm13, % ymm8, % ymm3
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#4,<r7=reg256#12,<r7=reg256#12
# asm 2: vpxor <r=%ymm3,<r7=%ymm11,<r7=%ymm11
vpxor % ymm3, % ymm11, % ymm11
# qhasm: r = b5 & a1
# asm 1: vpand <b5=reg256#14,<a1=reg256#11,>r=reg256#4
# asm 2: vpand <b5=%ymm13,<a1=%ymm10,>r=%ymm3
vpand % ymm13, % ymm10, % ymm3
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1
# asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0
vpxor % ymm3, % ymm0, % ymm0
# qhasm: r5 = b5 & a0
# asm 1: vpand <b5=reg256#14,<a0=reg256#13,>r5=reg256#4
# asm 2: vpand <b5=%ymm13,<a0=%ymm12,>r5=%ymm3
vpand % ymm13, % ymm12, % ymm3
# qhasm: b4 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b4=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b4=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b4 & a6
# asm 1: vpand <b4=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b4=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#15,<r10=reg256#6,<r10=reg256#6
# asm 2: vpxor <r=%ymm14,<r10=%ymm5,<r10=%ymm5
vpxor % ymm14, % ymm5, % ymm5
# qhasm: mem256[ ptr + 320 ] = r10
# asm 1: vmovupd <r10=reg256#6,320(<ptr=int64#5)
# asm 2: vmovupd <r10=%ymm5,320(<ptr=%r8)
vmovupd % ymm5, 320( % r8)
# qhasm: r = b4 & a5
# asm 1: vpand <b4=reg256#14,<a5=reg256#3,>r=reg256#6
# asm 2: vpand <b4=%ymm13,<a5=%ymm2,>r=%ymm5
vpand % ymm13, % ymm2, % ymm5
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#6,<r9=reg256#8,<r9=reg256#8
# asm 2: vpxor <r=%ymm5,<r9=%ymm7,<r9=%ymm7
vpxor % ymm5, % ymm7, % ymm7
# qhasm: r = b4 & a4
# asm 1: vpand <b4=reg256#14,<a4=reg256#5,>r=reg256#6
# asm 2: vpand <b4=%ymm13,<a4=%ymm4,>r=%ymm5
vpand % ymm13, % ymm4, % ymm5
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#6,<r8=reg256#10,<r8=reg256#10
# asm 2: vpxor <r=%ymm5,<r8=%ymm9,<r8=%ymm9
vpxor % ymm5, % ymm9, % ymm9
# qhasm: r = b4 & a3
# asm 1: vpand <b4=reg256#14,<a3=reg256#7,>r=reg256#6
# asm 2: vpand <b4=%ymm13,<a3=%ymm6,>r=%ymm5
vpand % ymm13, % ymm6, % ymm5
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#6,<r7=reg256#12,<r7=reg256#12
# asm 2: vpxor <r=%ymm5,<r7=%ymm11,<r7=%ymm11
vpxor % ymm5, % ymm11, % ymm11
# qhasm: r = b4 & a2
# asm 1: vpand <b4=reg256#14,<a2=reg256#9,>r=reg256#6
# asm 2: vpand <b4=%ymm13,<a2=%ymm8,>r=%ymm5
vpand % ymm13, % ymm8, % ymm5
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1
# asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0
vpxor % ymm5, % ymm0, % ymm0
# qhasm: r = b4 & a1
# asm 1: vpand <b4=reg256#14,<a1=reg256#11,>r=reg256#6
# asm 2: vpand <b4=%ymm13,<a1=%ymm10,>r=%ymm5
vpand % ymm13, % ymm10, % ymm5
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4
# asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3
vpxor % ymm5, % ymm3, % ymm3
# qhasm: r4 = b4 & a0
# asm 1: vpand <b4=reg256#14,<a0=reg256#13,>r4=reg256#6
# asm 2: vpand <b4=%ymm13,<a0=%ymm12,>r4=%ymm5
vpand % ymm13, % ymm12, % ymm5
# qhasm: b3 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b3=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b3=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b3 & a6
# asm 1: vpand <b3=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b3=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#15,<r9=reg256#8,<r9=reg256#8
# asm 2: vpxor <r=%ymm14,<r9=%ymm7,<r9=%ymm7
vpxor % ymm14, % ymm7, % ymm7
# qhasm: mem256[ ptr + 288 ] = r9
# asm 1: vmovupd <r9=reg256#8,288(<ptr=int64#5)
# asm 2: vmovupd <r9=%ymm7,288(<ptr=%r8)
vmovupd % ymm7, 288( % r8)
# qhasm: r = b3 & a5
# asm 1: vpand <b3=reg256#14,<a5=reg256#3,>r=reg256#8
# asm 2: vpand <b3=%ymm13,<a5=%ymm2,>r=%ymm7
vpand % ymm13, % ymm2, % ymm7
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#8,<r8=reg256#10,<r8=reg256#10
# asm 2: vpxor <r=%ymm7,<r8=%ymm9,<r8=%ymm9
vpxor % ymm7, % ymm9, % ymm9
# qhasm: r = b3 & a4
# asm 1: vpand <b3=reg256#14,<a4=reg256#5,>r=reg256#8
# asm 2: vpand <b3=%ymm13,<a4=%ymm4,>r=%ymm7
vpand % ymm13, % ymm4, % ymm7
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#8,<r7=reg256#12,<r7=reg256#12
# asm 2: vpxor <r=%ymm7,<r7=%ymm11,<r7=%ymm11
vpxor % ymm7, % ymm11, % ymm11
# qhasm: r = b3 & a3
# asm 1: vpand <b3=reg256#14,<a3=reg256#7,>r=reg256#8
# asm 2: vpand <b3=%ymm13,<a3=%ymm6,>r=%ymm7
vpand % ymm13, % ymm6, % ymm7
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1
# asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0
vpxor % ymm7, % ymm0, % ymm0
# qhasm: r = b3 & a2
# asm 1: vpand <b3=reg256#14,<a2=reg256#9,>r=reg256#8
# asm 2: vpand <b3=%ymm13,<a2=%ymm8,>r=%ymm7
vpand % ymm13, % ymm8, % ymm7
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4
# asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3
vpxor % ymm7, % ymm3, % ymm3
# qhasm: r = b3 & a1
# asm 1: vpand <b3=reg256#14,<a1=reg256#11,>r=reg256#8
# asm 2: vpand <b3=%ymm13,<a1=%ymm10,>r=%ymm7
vpand % ymm13, % ymm10, % ymm7
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6
# asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5
vpxor % ymm7, % ymm5, % ymm5
# qhasm: r3 = b3 & a0
# asm 1: vpand <b3=reg256#14,<a0=reg256#13,>r3=reg256#8
# asm 2: vpand <b3=%ymm13,<a0=%ymm12,>r3=%ymm7
vpand % ymm13, % ymm12, % ymm7
# qhasm: b2 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b2=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b2=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b2 & a6
# asm 1: vpand <b2=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b2=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#15,<r8=reg256#10,<r8=reg256#10
# asm 2: vpxor <r=%ymm14,<r8=%ymm9,<r8=%ymm9
vpxor % ymm14, % ymm9, % ymm9
# qhasm: mem256[ ptr + 256 ] = r8
# asm 1: vmovupd <r8=reg256#10,256(<ptr=int64#5)
# asm 2: vmovupd <r8=%ymm9,256(<ptr=%r8)
vmovupd % ymm9, 256( % r8)
# qhasm: r = b2 & a5
# asm 1: vpand <b2=reg256#14,<a5=reg256#3,>r=reg256#10
# asm 2: vpand <b2=%ymm13,<a5=%ymm2,>r=%ymm9
vpand % ymm13, % ymm2, % ymm9
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#10,<r7=reg256#12,<r7=reg256#12
# asm 2: vpxor <r=%ymm9,<r7=%ymm11,<r7=%ymm11
vpxor % ymm9, % ymm11, % ymm11
# qhasm: r = b2 & a4
# asm 1: vpand <b2=reg256#14,<a4=reg256#5,>r=reg256#10
# asm 2: vpand <b2=%ymm13,<a4=%ymm4,>r=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1
# asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0
vpxor % ymm9, % ymm0, % ymm0
# qhasm: r = b2 & a3
# asm 1: vpand <b2=reg256#14,<a3=reg256#7,>r=reg256#10
# asm 2: vpand <b2=%ymm13,<a3=%ymm6,>r=%ymm9
vpand % ymm13, % ymm6, % ymm9
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4
# asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3
vpxor % ymm9, % ymm3, % ymm3
# qhasm: r = b2 & a2
# asm 1: vpand <b2=reg256#14,<a2=reg256#9,>r=reg256#10
# asm 2: vpand <b2=%ymm13,<a2=%ymm8,>r=%ymm9
vpand % ymm13, % ymm8, % ymm9
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6
# asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5
vpxor % ymm9, % ymm5, % ymm5
# qhasm: r = b2 & a1
# asm 1: vpand <b2=reg256#14,<a1=reg256#11,>r=reg256#10
# asm 2: vpand <b2=%ymm13,<a1=%ymm10,>r=%ymm9
vpand % ymm13, % ymm10, % ymm9
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8
# asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7
vpxor % ymm9, % ymm7, % ymm7
# qhasm: r2 = b2 & a0
# asm 1: vpand <b2=reg256#14,<a0=reg256#13,>r2=reg256#10
# asm 2: vpand <b2=%ymm13,<a0=%ymm12,>r2=%ymm9
vpand % ymm13, % ymm12, % ymm9
# qhasm: b1 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b1=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b1=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b1 & a6
# asm 1: vpand <b1=reg256#14,<a6=reg256#2,>r=reg256#15
# asm 2: vpand <b1=%ymm13,<a6=%ymm1,>r=%ymm14
vpand % ymm13, % ymm1, % ymm14
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#15,<r7=reg256#12,<r7=reg256#12
# asm 2: vpxor <r=%ymm14,<r7=%ymm11,<r7=%ymm11
vpxor % ymm14, % ymm11, % ymm11
# qhasm: mem256[ ptr + 224 ] = r7
# asm 1: vmovupd <r7=reg256#12,224(<ptr=int64#5)
# asm 2: vmovupd <r7=%ymm11,224(<ptr=%r8)
vmovupd % ymm11, 224( % r8)
# qhasm: r = b1 & a5
# asm 1: vpand <b1=reg256#14,<a5=reg256#3,>r=reg256#12
# asm 2: vpand <b1=%ymm13,<a5=%ymm2,>r=%ymm11
vpand % ymm13, % ymm2, % ymm11
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#12,<r6=reg256#1,<r6=reg256#1
# asm 2: vpxor <r=%ymm11,<r6=%ymm0,<r6=%ymm0
vpxor % ymm11, % ymm0, % ymm0
# qhasm: r = b1 & a4
# asm 1: vpand <b1=reg256#14,<a4=reg256#5,>r=reg256#12
# asm 2: vpand <b1=%ymm13,<a4=%ymm4,>r=%ymm11
vpand % ymm13, % ymm4, % ymm11
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#12,<r5=reg256#4,<r5=reg256#4
# asm 2: vpxor <r=%ymm11,<r5=%ymm3,<r5=%ymm3
vpxor % ymm11, % ymm3, % ymm3
# qhasm: r = b1 & a3
# asm 1: vpand <b1=reg256#14,<a3=reg256#7,>r=reg256#12
# asm 2: vpand <b1=%ymm13,<a3=%ymm6,>r=%ymm11
vpand % ymm13, % ymm6, % ymm11
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#12,<r4=reg256#6,<r4=reg256#6
# asm 2: vpxor <r=%ymm11,<r4=%ymm5,<r4=%ymm5
vpxor % ymm11, % ymm5, % ymm5
# qhasm: r = b1 & a2
# asm 1: vpand <b1=reg256#14,<a2=reg256#9,>r=reg256#12
# asm 2: vpand <b1=%ymm13,<a2=%ymm8,>r=%ymm11
vpand % ymm13, % ymm8, % ymm11
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#12,<r3=reg256#8,<r3=reg256#8
# asm 2: vpxor <r=%ymm11,<r3=%ymm7,<r3=%ymm7
vpxor % ymm11, % ymm7, % ymm7
# qhasm: r = b1 & a1
# asm 1: vpand <b1=reg256#14,<a1=reg256#11,>r=reg256#12
# asm 2: vpand <b1=%ymm13,<a1=%ymm10,>r=%ymm11
vpand % ymm13, % ymm10, % ymm11
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#12,<r2=reg256#10,<r2=reg256#10
# asm 2: vpxor <r=%ymm11,<r2=%ymm9,<r2=%ymm9
vpxor % ymm11, % ymm9, % ymm9
# qhasm: r1 = b1 & a0
# asm 1: vpand <b1=reg256#14,<a0=reg256#13,>r1=reg256#12
# asm 2: vpand <b1=%ymm13,<a0=%ymm12,>r1=%ymm11
vpand % ymm13, % ymm12, % ymm11
# qhasm: b0 = mem128[ input_2 + 0 ] x2
# asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#14
# asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm13
vbroadcasti128 0( % rdx), % ymm13
# qhasm: input_2 -= input_3
# asm 1: sub <input_3=int64#4,<input_2=int64#3
# asm 2: sub <input_3=%rcx,<input_2=%rdx
sub % rcx, % rdx
# qhasm: r = b0 & a6
# asm 1: vpand <b0=reg256#14,<a6=reg256#2,>r=reg256#2
# asm 2: vpand <b0=%ymm13,<a6=%ymm1,>r=%ymm1
vpand % ymm13, % ymm1, % ymm1
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#2,<r6=reg256#1,<r6=reg256#1
# asm 2: vpxor <r=%ymm1,<r6=%ymm0,<r6=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: mem256[ ptr + 192 ] = r6
# asm 1: vmovupd <r6=reg256#1,192(<ptr=int64#5)
# asm 2: vmovupd <r6=%ymm0,192(<ptr=%r8)
vmovupd % ymm0, 192( % r8)
# qhasm: r = b0 & a5
# asm 1: vpand <b0=reg256#14,<a5=reg256#3,>r=reg256#1
# asm 2: vpand <b0=%ymm13,<a5=%ymm2,>r=%ymm0
vpand % ymm13, % ymm2, % ymm0
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4
# asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3
vpxor % ymm0, % ymm3, % ymm3
# qhasm: r = b0 & a4
# asm 1: vpand <b0=reg256#14,<a4=reg256#5,>r=reg256#1
# asm 2: vpand <b0=%ymm13,<a4=%ymm4,>r=%ymm0
vpand % ymm13, % ymm4, % ymm0
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6
# asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5
vpxor % ymm0, % ymm5, % ymm5
# qhasm: r = b0 & a3
# asm 1: vpand <b0=reg256#14,<a3=reg256#7,>r=reg256#1
# asm 2: vpand <b0=%ymm13,<a3=%ymm6,>r=%ymm0
vpand % ymm13, % ymm6, % ymm0
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8
# asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7
vpxor % ymm0, % ymm7, % ymm7
# qhasm: r = b0 & a2
# asm 1: vpand <b0=reg256#14,<a2=reg256#9,>r=reg256#1
# asm 2: vpand <b0=%ymm13,<a2=%ymm8,>r=%ymm0
vpand % ymm13, % ymm8, % ymm0
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10
# asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9
vpxor % ymm0, % ymm9, % ymm9
# qhasm: r = b0 & a1
# asm 1: vpand <b0=reg256#14,<a1=reg256#11,>r=reg256#1
# asm 2: vpand <b0=%ymm13,<a1=%ymm10,>r=%ymm0
vpand % ymm13, % ymm10, % ymm0
# qhasm: r1 ^= r
# asm 1: vpxor <r=reg256#1,<r1=reg256#12,<r1=reg256#12
# asm 2: vpxor <r=%ymm0,<r1=%ymm11,<r1=%ymm11
vpxor % ymm0, % ymm11, % ymm11
# qhasm: r0 = b0 & a0
# asm 1: vpand <b0=reg256#14,<a0=reg256#13,>r0=reg256#1
# asm 2: vpand <b0=%ymm13,<a0=%ymm12,>r0=%ymm0
vpand % ymm13, % ymm12, % ymm0
# qhasm: mem256[ ptr + 160 ] = r5
# asm 1: vmovupd <r5=reg256#4,160(<ptr=int64#5)
# asm 2: vmovupd <r5=%ymm3,160(<ptr=%r8)
vmovupd % ymm3, 160( % r8)
# qhasm: mem256[ ptr + 128 ] = r4
# asm 1: vmovupd <r4=reg256#6,128(<ptr=int64#5)
# asm 2: vmovupd <r4=%ymm5,128(<ptr=%r8)
vmovupd % ymm5, 128( % r8)
# qhasm: mem256[ ptr + 96 ] = r3
# asm 1: vmovupd <r3=reg256#8,96(<ptr=int64#5)
# asm 2: vmovupd <r3=%ymm7,96(<ptr=%r8)
vmovupd % ymm7, 96( % r8)
# qhasm: mem256[ ptr + 64 ] = r2
# asm 1: vmovupd <r2=reg256#10,64(<ptr=int64#5)
# asm 2: vmovupd <r2=%ymm9,64(<ptr=%r8)
vmovupd % ymm9, 64( % r8)
# qhasm: mem256[ ptr + 32 ] = r1
# asm 1: vmovupd <r1=reg256#12,32(<ptr=int64#5)
# asm 2: vmovupd <r1=%ymm11,32(<ptr=%r8)
vmovupd % ymm11, 32( % r8)
# qhasm: mem256[ ptr + 0 ] = r0
# asm 1: vmovupd <r0=reg256#1,0(<ptr=int64#5)
# asm 2: vmovupd <r0=%ymm0,0(<ptr=%r8)
vmovupd % ymm0, 0( % r8)
# qhasm: vzeroupper
vzeroupper
# qhasm: h24 = mem128[ ptr + 560 ]
# asm 1: movdqu 560(<ptr=int64#5),>h24=reg128#1
# asm 2: movdqu 560(<ptr=%r8),>h24=%xmm0
movdqu 560( % r8), % xmm0
# qhasm: h11 = h24
# asm 1: movdqa <h24=reg128#1,>h11=reg128#2
# asm 2: movdqa <h24=%xmm0,>h11=%xmm1
movdqa % xmm0, % xmm1
# qhasm: h12 = h24
# asm 1: movdqa <h24=reg128#1,>h12=reg128#3
# asm 2: movdqa <h24=%xmm0,>h12=%xmm2
movdqa % xmm0, % xmm2
# qhasm: h14 = h24
# asm 1: movdqa <h24=reg128#1,>h14=reg128#4
# asm 2: movdqa <h24=%xmm0,>h14=%xmm3
movdqa % xmm0, % xmm3
# qhasm: h15 = h24
# asm 1: movdqa <h24=reg128#1,>h15=reg128#1
# asm 2: movdqa <h24=%xmm0,>h15=%xmm0
movdqa % xmm0, % xmm0
# qhasm: h23 = mem128[ ptr + 528 ]
# asm 1: movdqu 528(<ptr=int64#5),>h23=reg128#5
# asm 2: movdqu 528(<ptr=%r8),>h23=%xmm4
movdqu 528( % r8), % xmm4
# qhasm: h10 = h23
# asm 1: movdqa <h23=reg128#5,>h10=reg128#6
# asm 2: movdqa <h23=%xmm4,>h10=%xmm5
movdqa % xmm4, % xmm5
# qhasm: h11 = h11 ^ h23
# asm 1: vpxor <h23=reg128#5,<h11=reg128#2,>h11=reg128#2
# asm 2: vpxor <h23=%xmm4,<h11=%xmm1,>h11=%xmm1
vpxor % xmm4, % xmm1, % xmm1
# qhasm: h13 = h23
# asm 1: movdqa <h23=reg128#5,>h13=reg128#7
# asm 2: movdqa <h23=%xmm4,>h13=%xmm6
movdqa % xmm4, % xmm6
# qhasm: h14 = h14 ^ h23
# asm 1: vpxor <h23=reg128#5,<h14=reg128#4,>h14=reg128#4
# asm 2: vpxor <h23=%xmm4,<h14=%xmm3,>h14=%xmm3
vpxor % xmm4, % xmm3, % xmm3
# qhasm: h22 = mem128[ ptr + 496 ]
# asm 1: movdqu 496(<ptr=int64#5),>h22=reg128#5
# asm 2: movdqu 496(<ptr=%r8),>h22=%xmm4
movdqu 496( % r8), % xmm4
# qhasm: h9 = h22
# asm 1: movdqa <h22=reg128#5,>h9=reg128#8
# asm 2: movdqa <h22=%xmm4,>h9=%xmm7
movdqa % xmm4, % xmm7
# qhasm: h10 = h10 ^ h22
# asm 1: vpxor <h22=reg128#5,<h10=reg128#6,>h10=reg128#6
# asm 2: vpxor <h22=%xmm4,<h10=%xmm5,>h10=%xmm5
vpxor % xmm4, % xmm5, % xmm5
# qhasm: h12 = h12 ^ h22
# asm 1: vpxor <h22=reg128#5,<h12=reg128#3,>h12=reg128#3
# asm 2: vpxor <h22=%xmm4,<h12=%xmm2,>h12=%xmm2
vpxor % xmm4, % xmm2, % xmm2
# qhasm: h13 = h13 ^ h22
# asm 1: vpxor <h22=reg128#5,<h13=reg128#7,>h13=reg128#5
# asm 2: vpxor <h22=%xmm4,<h13=%xmm6,>h13=%xmm4
vpxor % xmm4, % xmm6, % xmm4
# qhasm: h21 = mem128[ ptr + 464 ]
# asm 1: movdqu 464(<ptr=int64#5),>h21=reg128#7
# asm 2: movdqu 464(<ptr=%r8),>h21=%xmm6
movdqu 464( % r8), % xmm6
# qhasm: h8 = h21
# asm 1: movdqa <h21=reg128#7,>h8=reg128#9
# asm 2: movdqa <h21=%xmm6,>h8=%xmm8
movdqa % xmm6, % xmm8
# qhasm: h9 = h9 ^ h21
# asm 1: vpxor <h21=reg128#7,<h9=reg128#8,>h9=reg128#8
# asm 2: vpxor <h21=%xmm6,<h9=%xmm7,>h9=%xmm7
vpxor % xmm6, % xmm7, % xmm7
# qhasm: h11 = h11 ^ h21
# asm 1: vpxor <h21=reg128#7,<h11=reg128#2,>h11=reg128#2
# asm 2: vpxor <h21=%xmm6,<h11=%xmm1,>h11=%xmm1
vpxor % xmm6, % xmm1, % xmm1
# qhasm: h12 = h12 ^ h21
# asm 1: vpxor <h21=reg128#7,<h12=reg128#3,>h12=reg128#3
# asm 2: vpxor <h21=%xmm6,<h12=%xmm2,>h12=%xmm2
vpxor % xmm6, % xmm2, % xmm2
# qhasm: h20 = mem128[ ptr + 432 ]
# asm 1: movdqu 432(<ptr=int64#5),>h20=reg128#7
# asm 2: movdqu 432(<ptr=%r8),>h20=%xmm6
movdqu 432( % r8), % xmm6
# qhasm: h7 = h20
# asm 1: movdqa <h20=reg128#7,>h7=reg128#10
# asm 2: movdqa <h20=%xmm6,>h7=%xmm9
movdqa % xmm6, % xmm9
# qhasm: h8 = h8 ^ h20
# asm 1: vpxor <h20=reg128#7,<h8=reg128#9,>h8=reg128#9
# asm 2: vpxor <h20=%xmm6,<h8=%xmm8,>h8=%xmm8
vpxor % xmm6, % xmm8, % xmm8
# qhasm: h10 = h10 ^ h20
# asm 1: vpxor <h20=reg128#7,<h10=reg128#6,>h10=reg128#6
# asm 2: vpxor <h20=%xmm6,<h10=%xmm5,>h10=%xmm5
vpxor % xmm6, % xmm5, % xmm5
# qhasm: h11 = h11 ^ h20
# asm 1: vpxor <h20=reg128#7,<h11=reg128#2,>h11=reg128#2
# asm 2: vpxor <h20=%xmm6,<h11=%xmm1,>h11=%xmm1
vpxor % xmm6, % xmm1, % xmm1
# qhasm: h19 = mem128[ ptr + 400 ]
# asm 1: movdqu 400(<ptr=int64#5),>h19=reg128#7
# asm 2: movdqu 400(<ptr=%r8),>h19=%xmm6
movdqu 400( % r8), % xmm6
# qhasm: h6 = h19
# asm 1: movdqa <h19=reg128#7,>h6=reg128#11
# asm 2: movdqa <h19=%xmm6,>h6=%xmm10
movdqa % xmm6, % xmm10
# qhasm: h7 = h7 ^ h19
# asm 1: vpxor <h19=reg128#7,<h7=reg128#10,>h7=reg128#10
# asm 2: vpxor <h19=%xmm6,<h7=%xmm9,>h7=%xmm9
vpxor % xmm6, % xmm9, % xmm9
# qhasm: h9 = h9 ^ h19
# asm 1: vpxor <h19=reg128#7,<h9=reg128#8,>h9=reg128#8
# asm 2: vpxor <h19=%xmm6,<h9=%xmm7,>h9=%xmm7
vpxor % xmm6, % xmm7, % xmm7
# qhasm: h10 = h10 ^ h19
# asm 1: vpxor <h19=reg128#7,<h10=reg128#6,>h10=reg128#6
# asm 2: vpxor <h19=%xmm6,<h10=%xmm5,>h10=%xmm5
vpxor % xmm6, % xmm5, % xmm5
# qhasm: h18 = mem128[ ptr + 368 ]
# asm 1: movdqu 368(<ptr=int64#5),>h18=reg128#7
# asm 2: movdqu 368(<ptr=%r8),>h18=%xmm6
movdqu 368( % r8), % xmm6
# qhasm: h18 = h18 ^ mem128[ ptr + 576 ]
# asm 1: vpxor 576(<ptr=int64#5),<h18=reg128#7,>h18=reg128#7
# asm 2: vpxor 576(<ptr=%r8),<h18=%xmm6,>h18=%xmm6
vpxor 576( % r8), % xmm6, % xmm6
# qhasm: h5 = h18
# asm 1: movdqa <h18=reg128#7,>h5=reg128#12
# asm 2: movdqa <h18=%xmm6,>h5=%xmm11
movdqa % xmm6, % xmm11
# qhasm: h6 = h6 ^ h18
# asm 1: vpxor <h18=reg128#7,<h6=reg128#11,>h6=reg128#11
# asm 2: vpxor <h18=%xmm6,<h6=%xmm10,>h6=%xmm10
vpxor % xmm6, % xmm10, % xmm10
# qhasm: h8 = h8 ^ h18
# asm 1: vpxor <h18=reg128#7,<h8=reg128#9,>h8=reg128#9
# asm 2: vpxor <h18=%xmm6,<h8=%xmm8,>h8=%xmm8
vpxor % xmm6, % xmm8, % xmm8
# qhasm: h9 = h9 ^ h18
# asm 1: vpxor <h18=reg128#7,<h9=reg128#8,>h9=reg128#7
# asm 2: vpxor <h18=%xmm6,<h9=%xmm7,>h9=%xmm6
vpxor % xmm6, % xmm7, % xmm6
# qhasm: h17 = mem128[ ptr + 336 ]
# asm 1: movdqu 336(<ptr=int64#5),>h17=reg128#8
# asm 2: movdqu 336(<ptr=%r8),>h17=%xmm7
movdqu 336( % r8), % xmm7
# qhasm: h17 = h17 ^ mem128[ ptr + 544 ]
# asm 1: vpxor 544(<ptr=int64#5),<h17=reg128#8,>h17=reg128#8
# asm 2: vpxor 544(<ptr=%r8),<h17=%xmm7,>h17=%xmm7
vpxor 544( % r8), % xmm7, % xmm7
# qhasm: h4 = h17
# asm 1: movdqa <h17=reg128#8,>h4=reg128#13
# asm 2: movdqa <h17=%xmm7,>h4=%xmm12
movdqa % xmm7, % xmm12
# qhasm: h5 = h5 ^ h17
# asm 1: vpxor <h17=reg128#8,<h5=reg128#12,>h5=reg128#12
# asm 2: vpxor <h17=%xmm7,<h5=%xmm11,>h5=%xmm11
vpxor % xmm7, % xmm11, % xmm11
# qhasm: h7 = h7 ^ h17
# asm 1: vpxor <h17=reg128#8,<h7=reg128#10,>h7=reg128#10
# asm 2: vpxor <h17=%xmm7,<h7=%xmm9,>h7=%xmm9
vpxor % xmm7, % xmm9, % xmm9
# qhasm: h8 = h8 ^ h17
# asm 1: vpxor <h17=reg128#8,<h8=reg128#9,>h8=reg128#8
# asm 2: vpxor <h17=%xmm7,<h8=%xmm8,>h8=%xmm7
vpxor % xmm7, % xmm8, % xmm7
# qhasm: h16 = mem128[ ptr + 304 ]
# asm 1: movdqu 304(<ptr=int64#5),>h16=reg128#9
# asm 2: movdqu 304(<ptr=%r8),>h16=%xmm8
movdqu 304( % r8), % xmm8
# qhasm: h16 = h16 ^ mem128[ ptr + 512 ]
# asm 1: vpxor 512(<ptr=int64#5),<h16=reg128#9,>h16=reg128#9
# asm 2: vpxor 512(<ptr=%r8),<h16=%xmm8,>h16=%xmm8
vpxor 512( % r8), % xmm8, % xmm8
# qhasm: h3 = h16
# asm 1: movdqa <h16=reg128#9,>h3=reg128#14
# asm 2: movdqa <h16=%xmm8,>h3=%xmm13
movdqa % xmm8, % xmm13
# qhasm: h4 = h4 ^ h16
# asm 1: vpxor <h16=reg128#9,<h4=reg128#13,>h4=reg128#13
# asm 2: vpxor <h16=%xmm8,<h4=%xmm12,>h4=%xmm12
vpxor % xmm8, % xmm12, % xmm12
# qhasm: h6 = h6 ^ h16
# asm 1: vpxor <h16=reg128#9,<h6=reg128#11,>h6=reg128#11
# asm 2: vpxor <h16=%xmm8,<h6=%xmm10,>h6=%xmm10
vpxor % xmm8, % xmm10, % xmm10
# qhasm: h7 = h7 ^ h16
# asm 1: vpxor <h16=reg128#9,<h7=reg128#10,>h7=reg128#9
# asm 2: vpxor <h16=%xmm8,<h7=%xmm9,>h7=%xmm8
vpxor % xmm8, % xmm9, % xmm8
# qhasm: h15 = h15 ^ mem128[ ptr + 272 ]
# asm 1: vpxor 272(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1
# asm 2: vpxor 272(<ptr=%r8),<h15=%xmm0,>h15=%xmm0
vpxor 272( % r8), % xmm0, % xmm0
# qhasm: h15 = h15 ^ mem128[ ptr + 480 ]
# asm 1: vpxor 480(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1
# asm 2: vpxor 480(<ptr=%r8),<h15=%xmm0,>h15=%xmm0
vpxor 480( % r8), % xmm0, % xmm0
# qhasm: h2 = h15
# asm 1: movdqa <h15=reg128#1,>h2=reg128#10
# asm 2: movdqa <h15=%xmm0,>h2=%xmm9
movdqa % xmm0, % xmm9
# qhasm: h3 = h3 ^ h15
# asm 1: vpxor <h15=reg128#1,<h3=reg128#14,>h3=reg128#14
# asm 2: vpxor <h15=%xmm0,<h3=%xmm13,>h3=%xmm13
vpxor % xmm0, % xmm13, % xmm13
# qhasm: h5 = h5 ^ h15
# asm 1: vpxor <h15=reg128#1,<h5=reg128#12,>h5=reg128#12
# asm 2: vpxor <h15=%xmm0,<h5=%xmm11,>h5=%xmm11
vpxor % xmm0, % xmm11, % xmm11
# qhasm: h6 = h6 ^ h15
# asm 1: vpxor <h15=reg128#1,<h6=reg128#11,>h6=reg128#1
# asm 2: vpxor <h15=%xmm0,<h6=%xmm10,>h6=%xmm0
vpxor % xmm0, % xmm10, % xmm0
# qhasm: h14 = h14 ^ mem128[ ptr + 240 ]
# asm 1: vpxor 240(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4
# asm 2: vpxor 240(<ptr=%r8),<h14=%xmm3,>h14=%xmm3
vpxor 240( % r8), % xmm3, % xmm3
# qhasm: h14 = h14 ^ mem128[ ptr + 448 ]
# asm 1: vpxor 448(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4
# asm 2: vpxor 448(<ptr=%r8),<h14=%xmm3,>h14=%xmm3
vpxor 448( % r8), % xmm3, % xmm3
# qhasm: h1 = h14
# asm 1: movdqa <h14=reg128#4,>h1=reg128#11
# asm 2: movdqa <h14=%xmm3,>h1=%xmm10
movdqa % xmm3, % xmm10
# qhasm: h2 = h2 ^ h14
# asm 1: vpxor <h14=reg128#4,<h2=reg128#10,>h2=reg128#10
# asm 2: vpxor <h14=%xmm3,<h2=%xmm9,>h2=%xmm9
vpxor % xmm3, % xmm9, % xmm9
# qhasm: h4 = h4 ^ h14
# asm 1: vpxor <h14=reg128#4,<h4=reg128#13,>h4=reg128#13
# asm 2: vpxor <h14=%xmm3,<h4=%xmm12,>h4=%xmm12
vpxor % xmm3, % xmm12, % xmm12
# qhasm: h5 = h5 ^ h14
# asm 1: vpxor <h14=reg128#4,<h5=reg128#12,>h5=reg128#4
# asm 2: vpxor <h14=%xmm3,<h5=%xmm11,>h5=%xmm3
vpxor % xmm3, % xmm11, % xmm3
# qhasm: h13 = h13 ^ mem128[ ptr + 208 ]
# asm 1: vpxor 208(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5
# asm 2: vpxor 208(<ptr=%r8),<h13=%xmm4,>h13=%xmm4
vpxor 208( % r8), % xmm4, % xmm4
# qhasm: h13 = h13 ^ mem128[ ptr + 416 ]
# asm 1: vpxor 416(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5
# asm 2: vpxor 416(<ptr=%r8),<h13=%xmm4,>h13=%xmm4
vpxor 416( % r8), % xmm4, % xmm4
# qhasm: h0 = h13
# asm 1: movdqa <h13=reg128#5,>h0=reg128#12
# asm 2: movdqa <h13=%xmm4,>h0=%xmm11
movdqa % xmm4, % xmm11
# qhasm: h1 = h1 ^ h13
# asm 1: vpxor <h13=reg128#5,<h1=reg128#11,>h1=reg128#11
# asm 2: vpxor <h13=%xmm4,<h1=%xmm10,>h1=%xmm10
vpxor % xmm4, % xmm10, % xmm10
# qhasm: h3 = h3 ^ h13
# asm 1: vpxor <h13=reg128#5,<h3=reg128#14,>h3=reg128#14
# asm 2: vpxor <h13=%xmm4,<h3=%xmm13,>h3=%xmm13
vpxor % xmm4, % xmm13, % xmm13
# qhasm: h4 = h4 ^ h13
# asm 1: vpxor <h13=reg128#5,<h4=reg128#13,>h4=reg128#5
# asm 2: vpxor <h13=%xmm4,<h4=%xmm12,>h4=%xmm4
vpxor % xmm4, % xmm12, % xmm4
# qhasm: h12 = h12 ^ mem128[ ptr + 384 ]
# asm 1: vpxor 384(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3
# asm 2: vpxor 384(<ptr=%r8),<h12=%xmm2,>h12=%xmm2
vpxor 384( % r8), % xmm2, % xmm2
# qhasm: h12 = h12 ^ mem128[ ptr + 176 ]
# asm 1: vpxor 176(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3
# asm 2: vpxor 176(<ptr=%r8),<h12=%xmm2,>h12=%xmm2
vpxor 176( % r8), % xmm2, % xmm2
# qhasm: mem128[ input_0 + 192 ] = h12
# asm 1: movdqu <h12=reg128#3,192(<input_0=int64#1)
# asm 2: movdqu <h12=%xmm2,192(<input_0=%rdi)
movdqu % xmm2, 192( % rdi)
# qhasm: h11 = h11 ^ mem128[ ptr + 352 ]
# asm 1: vpxor 352(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2
# asm 2: vpxor 352(<ptr=%r8),<h11=%xmm1,>h11=%xmm1
vpxor 352( % r8), % xmm1, % xmm1
# qhasm: h11 = h11 ^ mem128[ ptr + 144 ]
# asm 1: vpxor 144(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2
# asm 2: vpxor 144(<ptr=%r8),<h11=%xmm1,>h11=%xmm1
vpxor 144( % r8), % xmm1, % xmm1
# qhasm: mem128[ input_0 + 176 ] = h11
# asm 1: movdqu <h11=reg128#2,176(<input_0=int64#1)
# asm 2: movdqu <h11=%xmm1,176(<input_0=%rdi)
movdqu % xmm1, 176( % rdi)
# qhasm: h10 = h10 ^ mem128[ ptr + 320 ]
# asm 1: vpxor 320(<ptr=int64#5),<h10=reg128#6,>h10=reg128#2
# asm 2: vpxor 320(<ptr=%r8),<h10=%xmm5,>h10=%xmm1
vpxor 320( % r8), % xmm5, % xmm1
# qhasm: h10 = h10 ^ mem128[ ptr + 112 ]
# asm 1: vpxor 112(<ptr=int64#5),<h10=reg128#2,>h10=reg128#2
# asm 2: vpxor 112(<ptr=%r8),<h10=%xmm1,>h10=%xmm1
vpxor 112( % r8), % xmm1, % xmm1
# qhasm: mem128[ input_0 + 160 ] = h10
# asm 1: movdqu <h10=reg128#2,160(<input_0=int64#1)
# asm 2: movdqu <h10=%xmm1,160(<input_0=%rdi)
movdqu % xmm1, 160( % rdi)
# qhasm: h9 = h9 ^ mem128[ ptr + 288 ]
# asm 1: vpxor 288(<ptr=int64#5),<h9=reg128#7,>h9=reg128#2
# asm 2: vpxor 288(<ptr=%r8),<h9=%xmm6,>h9=%xmm1
vpxor 288( % r8), % xmm6, % xmm1
# qhasm: h9 = h9 ^ mem128[ ptr + 80 ]
# asm 1: vpxor 80(<ptr=int64#5),<h9=reg128#2,>h9=reg128#2
# asm 2: vpxor 80(<ptr=%r8),<h9=%xmm1,>h9=%xmm1
vpxor 80( % r8), % xmm1, % xmm1
# qhasm: mem128[ input_0 + 144 ] = h9
# asm 1: movdqu <h9=reg128#2,144(<input_0=int64#1)
# asm 2: movdqu <h9=%xmm1,144(<input_0=%rdi)
movdqu % xmm1, 144( % rdi)
# qhasm: h8 = h8 ^ mem128[ ptr + 256 ]
# asm 1: vpxor 256(<ptr=int64#5),<h8=reg128#8,>h8=reg128#2
# asm 2: vpxor 256(<ptr=%r8),<h8=%xmm7,>h8=%xmm1
vpxor 256( % r8), % xmm7, % xmm1
# qhasm: h8 = h8 ^ mem128[ ptr + 48 ]
# asm 1: vpxor 48(<ptr=int64#5),<h8=reg128#2,>h8=reg128#2
# asm 2: vpxor 48(<ptr=%r8),<h8=%xmm1,>h8=%xmm1
vpxor 48( % r8), % xmm1, % xmm1
# qhasm: mem128[ input_0 + 128 ] = h8
# asm 1: movdqu <h8=reg128#2,128(<input_0=int64#1)
# asm 2: movdqu <h8=%xmm1,128(<input_0=%rdi)
movdqu % xmm1, 128( % rdi)
# qhasm: h7 = h7 ^ mem128[ ptr + 224 ]
# asm 1: vpxor 224(<ptr=int64#5),<h7=reg128#9,>h7=reg128#2
# asm 2: vpxor 224(<ptr=%r8),<h7=%xmm8,>h7=%xmm1
vpxor 224( % r8), % xmm8, % xmm1
# qhasm: h7 = h7 ^ mem128[ ptr + 16 ]
# asm 1: vpxor 16(<ptr=int64#5),<h7=reg128#2,>h7=reg128#2
# asm 2: vpxor 16(<ptr=%r8),<h7=%xmm1,>h7=%xmm1
vpxor 16( % r8), % xmm1, % xmm1
# qhasm: mem128[ input_0 + 112 ] = h7
# asm 1: movdqu <h7=reg128#2,112(<input_0=int64#1)
# asm 2: movdqu <h7=%xmm1,112(<input_0=%rdi)
movdqu % xmm1, 112( % rdi)
# qhasm: h6 = h6 ^ mem128[ ptr + 192 ]
# asm 1: vpxor 192(<ptr=int64#5),<h6=reg128#1,>h6=reg128#1
# asm 2: vpxor 192(<ptr=%r8),<h6=%xmm0,>h6=%xmm0
vpxor 192( % r8), % xmm0, % xmm0
# qhasm: mem128[ input_0 + 96 ] = h6
# asm 1: movdqu <h6=reg128#1,96(<input_0=int64#1)
# asm 2: movdqu <h6=%xmm0,96(<input_0=%rdi)
movdqu % xmm0, 96( % rdi)
# qhasm: h5 = h5 ^ mem128[ ptr + 160 ]
# asm 1: vpxor 160(<ptr=int64#5),<h5=reg128#4,>h5=reg128#1
# asm 2: vpxor 160(<ptr=%r8),<h5=%xmm3,>h5=%xmm0
vpxor 160( % r8), % xmm3, % xmm0
# qhasm: mem128[ input_0 + 80 ] = h5
# asm 1: movdqu <h5=reg128#1,80(<input_0=int64#1)
# asm 2: movdqu <h5=%xmm0,80(<input_0=%rdi)
movdqu % xmm0, 80( % rdi)
# qhasm: h4 = h4 ^ mem128[ ptr + 128 ]
# asm 1: vpxor 128(<ptr=int64#5),<h4=reg128#5,>h4=reg128#1
# asm 2: vpxor 128(<ptr=%r8),<h4=%xmm4,>h4=%xmm0
vpxor 128( % r8), % xmm4, % xmm0
# qhasm: mem128[ input_0 + 64 ] = h4
# asm 1: movdqu <h4=reg128#1,64(<input_0=int64#1)
# asm 2: movdqu <h4=%xmm0,64(<input_0=%rdi)
movdqu % xmm0, 64( % rdi)
# qhasm: h3 = h3 ^ mem128[ ptr + 96 ]
# asm 1: vpxor 96(<ptr=int64#5),<h3=reg128#14,>h3=reg128#1
# asm 2: vpxor 96(<ptr=%r8),<h3=%xmm13,>h3=%xmm0
vpxor 96( % r8), % xmm13, % xmm0
# qhasm: mem128[ input_0 + 48 ] = h3
# asm 1: movdqu <h3=reg128#1,48(<input_0=int64#1)
# asm 2: movdqu <h3=%xmm0,48(<input_0=%rdi)
movdqu % xmm0, 48( % rdi)
# qhasm: h2 = h2 ^ mem128[ ptr + 64 ]
# asm 1: vpxor 64(<ptr=int64#5),<h2=reg128#10,>h2=reg128#1
# asm 2: vpxor 64(<ptr=%r8),<h2=%xmm9,>h2=%xmm0
vpxor 64( % r8), % xmm9, % xmm0
# qhasm: mem128[ input_0 + 32 ] = h2
# asm 1: movdqu <h2=reg128#1,32(<input_0=int64#1)
# asm 2: movdqu <h2=%xmm0,32(<input_0=%rdi)
movdqu % xmm0, 32( % rdi)
# qhasm: h1 = h1 ^ mem128[ ptr + 32 ]
# asm 1: vpxor 32(<ptr=int64#5),<h1=reg128#11,>h1=reg128#1
# asm 2: vpxor 32(<ptr=%r8),<h1=%xmm10,>h1=%xmm0
vpxor 32( % r8), % xmm10, % xmm0
# qhasm: mem128[ input_0 + 16 ] = h1
# asm 1: movdqu <h1=reg128#1,16(<input_0=int64#1)
# asm 2: movdqu <h1=%xmm0,16(<input_0=%rdi)
movdqu % xmm0, 16( % rdi)
# qhasm: h0 = h0 ^ mem128[ ptr + 0 ]
# asm 1: vpxor 0(<ptr=int64#5),<h0=reg128#12,>h0=reg128#1
# asm 2: vpxor 0(<ptr=%r8),<h0=%xmm11,>h0=%xmm0
vpxor 0( % r8), % xmm11, % xmm0
# qhasm: mem128[ input_0 + 0 ] = h0
# asm 1: movdqu <h0=reg128#1,0(<input_0=int64#1)
# asm 2: movdqu <h0=%xmm0,0(<input_0=%rdi)
movdqu % xmm0, 0( % rdi)
# qhasm: return
add % r11, % rsp
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 11,545
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/vec_reduce_asm.S
|
#include "namespace.h"
#define vec_reduce_asm CRYPTO_NAMESPACE(vec_reduce_asm)
#define _vec_reduce_asm _CRYPTO_NAMESPACE(vec_reduce_asm)
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 c
# qhasm: int64 r
# qhasm: enter vec_reduce_asm
.p2align 5
.global _vec_reduce_asm
.global vec_reduce_asm
_vec_reduce_asm:
vec_reduce_asm:
mov % rsp, % r11
and $31, % r11
add $0, % r11
sub % r11, % rsp
# qhasm: r = 0
# asm 1: mov $0,>r=int64#7
# asm 2: mov $0,>r=%rax
mov $0, % rax
# qhasm: t0 = mem64[ input_0 + 192 ]
# asm 1: movq 192(<input_0=int64#1),>t0=int64#2
# asm 2: movq 192(<input_0=%rdi),>t0=%rsi
movq 192( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 200 ]
# asm 1: movq 200(<input_0=int64#1),>t1=int64#3
# asm 2: movq 200(<input_0=%rdi),>t1=%rdx
movq 200( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 176 ]
# asm 1: movq 176(<input_0=int64#1),>t0=int64#2
# asm 2: movq 176(<input_0=%rdi),>t0=%rsi
movq 176( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 184 ]
# asm 1: movq 184(<input_0=int64#1),>t1=int64#3
# asm 2: movq 184(<input_0=%rdi),>t1=%rdx
movq 184( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 160 ]
# asm 1: movq 160(<input_0=int64#1),>t0=int64#2
# asm 2: movq 160(<input_0=%rdi),>t0=%rsi
movq 160( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 168 ]
# asm 1: movq 168(<input_0=int64#1),>t1=int64#3
# asm 2: movq 168(<input_0=%rdi),>t1=%rdx
movq 168( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 144 ]
# asm 1: movq 144(<input_0=int64#1),>t0=int64#2
# asm 2: movq 144(<input_0=%rdi),>t0=%rsi
movq 144( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 152 ]
# asm 1: movq 152(<input_0=int64#1),>t1=int64#3
# asm 2: movq 152(<input_0=%rdi),>t1=%rdx
movq 152( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 128 ]
# asm 1: movq 128(<input_0=int64#1),>t0=int64#2
# asm 2: movq 128(<input_0=%rdi),>t0=%rsi
movq 128( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 136 ]
# asm 1: movq 136(<input_0=int64#1),>t1=int64#3
# asm 2: movq 136(<input_0=%rdi),>t1=%rdx
movq 136( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 112 ]
# asm 1: movq 112(<input_0=int64#1),>t0=int64#2
# asm 2: movq 112(<input_0=%rdi),>t0=%rsi
movq 112( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 120 ]
# asm 1: movq 120(<input_0=int64#1),>t1=int64#3
# asm 2: movq 120(<input_0=%rdi),>t1=%rdx
movq 120( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 96 ]
# asm 1: movq 96(<input_0=int64#1),>t0=int64#2
# asm 2: movq 96(<input_0=%rdi),>t0=%rsi
movq 96( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 104 ]
# asm 1: movq 104(<input_0=int64#1),>t1=int64#3
# asm 2: movq 104(<input_0=%rdi),>t1=%rdx
movq 104( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 80 ]
# asm 1: movq 80(<input_0=int64#1),>t0=int64#2
# asm 2: movq 80(<input_0=%rdi),>t0=%rsi
movq 80( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 88 ]
# asm 1: movq 88(<input_0=int64#1),>t1=int64#3
# asm 2: movq 88(<input_0=%rdi),>t1=%rdx
movq 88( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 64 ]
# asm 1: movq 64(<input_0=int64#1),>t0=int64#2
# asm 2: movq 64(<input_0=%rdi),>t0=%rsi
movq 64( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 72 ]
# asm 1: movq 72(<input_0=int64#1),>t1=int64#3
# asm 2: movq 72(<input_0=%rdi),>t1=%rdx
movq 72( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 48 ]
# asm 1: movq 48(<input_0=int64#1),>t0=int64#2
# asm 2: movq 48(<input_0=%rdi),>t0=%rsi
movq 48( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 56 ]
# asm 1: movq 56(<input_0=int64#1),>t1=int64#3
# asm 2: movq 56(<input_0=%rdi),>t1=%rdx
movq 56( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 32 ]
# asm 1: movq 32(<input_0=int64#1),>t0=int64#2
# asm 2: movq 32(<input_0=%rdi),>t0=%rsi
movq 32( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 40 ]
# asm 1: movq 40(<input_0=int64#1),>t1=int64#3
# asm 2: movq 40(<input_0=%rdi),>t1=%rdx
movq 40( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 16 ]
# asm 1: movq 16(<input_0=int64#1),>t0=int64#2
# asm 2: movq 16(<input_0=%rdi),>t0=%rsi
movq 16( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 24 ]
# asm 1: movq 24(<input_0=int64#1),>t1=int64#3
# asm 2: movq 24(<input_0=%rdi),>t1=%rdx
movq 24( % rdi), % rdx
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#3,<t0=int64#2
# asm 2: xor <t1=%rdx,<t0=%rsi
xor % rdx, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#2
# asm 2: popcnt <t0=%rsi, >c=%rsi
popcnt % rsi, % rsi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#2d
# asm 2: and $1,<c=%esi
and $1, % esi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#2,<r=int64#7
# asm 2: or <c=%rsi,<r=%rax
or % rsi, % rax
# qhasm: t0 = mem64[ input_0 + 0 ]
# asm 1: movq 0(<input_0=int64#1),>t0=int64#2
# asm 2: movq 0(<input_0=%rdi),>t0=%rsi
movq 0( % rdi), % rsi
# qhasm: t1 = mem64[ input_0 + 8 ]
# asm 1: movq 8(<input_0=int64#1),>t1=int64#1
# asm 2: movq 8(<input_0=%rdi),>t1=%rdi
movq 8( % rdi), % rdi
# qhasm: t0 ^= t1
# asm 1: xor <t1=int64#1,<t0=int64#2
# asm 2: xor <t1=%rdi,<t0=%rsi
xor % rdi, % rsi
# qhasm: c = count(t0)
# asm 1: popcnt <t0=int64#2, >c=int64#1
# asm 2: popcnt <t0=%rsi, >c=%rdi
popcnt % rsi, % rdi
# qhasm: (uint32) c &= 1
# asm 1: and $1,<c=int64#1d
# asm 2: and $1,<c=%edi
and $1, % edi
# qhasm: r <<= 1
# asm 1: shl $1,<r=int64#7
# asm 2: shl $1,<r=%rax
shl $1, % rax
# qhasm: r |= c
# asm 1: or <c=int64#1,<r=int64#7
# asm 2: or <c=%rdi,<r=%rax
or % rdi, % rax
# qhasm: return r
add % r11, % rsp
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 26,219
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/syndrome_asm.S
|
#include "namespace.h"
#define syndrome_asm CRYPTO_NAMESPACE(syndrome_asm)
#define _syndrome_asm _CRYPTO_NAMESPACE(syndrome_asm)
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: int64 b64
# qhasm: int64 synd
# qhasm: int64 addr
# qhasm: int64 c
# qhasm: int64 c_all
# qhasm: int64 row
# qhasm: int64 p
# qhasm: int64 e
# qhasm: int64 s
# qhasm: reg256 pp
# qhasm: reg256 ee
# qhasm: reg256 ss
# qhasm: int64 buf_ptr
# qhasm: stack256 buf
# qhasm: enter syndrome_asm
.p2align 5
.global _syndrome_asm
.global syndrome_asm
_syndrome_asm:
syndrome_asm:
mov % rsp, % r11
and $31, % r11
add $32, % r11
sub % r11, % rsp
# qhasm: input_1 += 1357008
# asm 1: add $1357008,<input_1=int64#2
# asm 2: add $1357008,<input_1=%rsi
add $1357008, % rsi
# qhasm: buf_ptr = &buf
# asm 1: leaq <buf=stack256#1,>buf_ptr=int64#4
# asm 2: leaq <buf=0(%rsp),>buf_ptr=%rcx
leaq 0( % rsp), % rcx
# qhasm: row = 1664
# asm 1: mov $1664,>row=int64#5
# asm 2: mov $1664,>row=%r8
mov $1664, % r8
# qhasm: loop:
._loop:
# qhasm: row -= 1
# asm 1: sub $1,<row=int64#5
# asm 2: sub $1,<row=%r8
sub $1, % r8
# qhasm: ss = mem256[ input_1 + 0 ]
# asm 1: vmovupd 0(<input_1=int64#2),>ss=reg256#1
# asm 2: vmovupd 0(<input_1=%rsi),>ss=%ymm0
vmovupd 0( % rsi), % ymm0
# qhasm: ee = mem256[ input_2 + 208 ]
# asm 1: vmovupd 208(<input_2=int64#3),>ee=reg256#2
# asm 2: vmovupd 208(<input_2=%rdx),>ee=%ymm1
vmovupd 208( % rdx), % ymm1
# qhasm: ss &= ee
# asm 1: vpand <ee=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpand <ee=%ymm1,<ss=%ymm0,<ss=%ymm0
vpand % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 32 ]
# asm 1: vmovupd 32(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 32(<input_1=%rsi),>pp=%ymm1
vmovupd 32( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 240 ]
# asm 1: vmovupd 240(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 240(<input_2=%rdx),>ee=%ymm2
vmovupd 240( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 64 ]
# asm 1: vmovupd 64(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 64(<input_1=%rsi),>pp=%ymm1
vmovupd 64( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 272 ]
# asm 1: vmovupd 272(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 272(<input_2=%rdx),>ee=%ymm2
vmovupd 272( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 96 ]
# asm 1: vmovupd 96(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 96(<input_1=%rsi),>pp=%ymm1
vmovupd 96( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 304 ]
# asm 1: vmovupd 304(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 304(<input_2=%rdx),>ee=%ymm2
vmovupd 304( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 128 ]
# asm 1: vmovupd 128(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 128(<input_1=%rsi),>pp=%ymm1
vmovupd 128( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 336 ]
# asm 1: vmovupd 336(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 336(<input_2=%rdx),>ee=%ymm2
vmovupd 336( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 160 ]
# asm 1: vmovupd 160(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 160(<input_1=%rsi),>pp=%ymm1
vmovupd 160( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 368 ]
# asm 1: vmovupd 368(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 368(<input_2=%rdx),>ee=%ymm2
vmovupd 368( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 192 ]
# asm 1: vmovupd 192(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 192(<input_1=%rsi),>pp=%ymm1
vmovupd 192( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 400 ]
# asm 1: vmovupd 400(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 400(<input_2=%rdx),>ee=%ymm2
vmovupd 400( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 224 ]
# asm 1: vmovupd 224(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 224(<input_1=%rsi),>pp=%ymm1
vmovupd 224( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 432 ]
# asm 1: vmovupd 432(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 432(<input_2=%rdx),>ee=%ymm2
vmovupd 432( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 256 ]
# asm 1: vmovupd 256(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 256(<input_1=%rsi),>pp=%ymm1
vmovupd 256( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 464 ]
# asm 1: vmovupd 464(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 464(<input_2=%rdx),>ee=%ymm2
vmovupd 464( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 288 ]
# asm 1: vmovupd 288(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 288(<input_1=%rsi),>pp=%ymm1
vmovupd 288( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 496 ]
# asm 1: vmovupd 496(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 496(<input_2=%rdx),>ee=%ymm2
vmovupd 496( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 320 ]
# asm 1: vmovupd 320(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 320(<input_1=%rsi),>pp=%ymm1
vmovupd 320( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 528 ]
# asm 1: vmovupd 528(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 528(<input_2=%rdx),>ee=%ymm2
vmovupd 528( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 352 ]
# asm 1: vmovupd 352(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 352(<input_1=%rsi),>pp=%ymm1
vmovupd 352( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 560 ]
# asm 1: vmovupd 560(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 560(<input_2=%rdx),>ee=%ymm2
vmovupd 560( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 384 ]
# asm 1: vmovupd 384(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 384(<input_1=%rsi),>pp=%ymm1
vmovupd 384( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 592 ]
# asm 1: vmovupd 592(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 592(<input_2=%rdx),>ee=%ymm2
vmovupd 592( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 416 ]
# asm 1: vmovupd 416(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 416(<input_1=%rsi),>pp=%ymm1
vmovupd 416( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 624 ]
# asm 1: vmovupd 624(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 624(<input_2=%rdx),>ee=%ymm2
vmovupd 624( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 448 ]
# asm 1: vmovupd 448(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 448(<input_1=%rsi),>pp=%ymm1
vmovupd 448( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 656 ]
# asm 1: vmovupd 656(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 656(<input_2=%rdx),>ee=%ymm2
vmovupd 656( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 480 ]
# asm 1: vmovupd 480(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 480(<input_1=%rsi),>pp=%ymm1
vmovupd 480( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 688 ]
# asm 1: vmovupd 688(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 688(<input_2=%rdx),>ee=%ymm2
vmovupd 688( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 512 ]
# asm 1: vmovupd 512(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 512(<input_1=%rsi),>pp=%ymm1
vmovupd 512( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 720 ]
# asm 1: vmovupd 720(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 720(<input_2=%rdx),>ee=%ymm2
vmovupd 720( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 544 ]
# asm 1: vmovupd 544(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 544(<input_1=%rsi),>pp=%ymm1
vmovupd 544( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 752 ]
# asm 1: vmovupd 752(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 752(<input_2=%rdx),>ee=%ymm2
vmovupd 752( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 576 ]
# asm 1: vmovupd 576(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 576(<input_1=%rsi),>pp=%ymm1
vmovupd 576( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 784 ]
# asm 1: vmovupd 784(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 784(<input_2=%rdx),>ee=%ymm2
vmovupd 784( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 608 ]
# asm 1: vmovupd 608(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 608(<input_1=%rsi),>pp=%ymm1
vmovupd 608( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 816 ]
# asm 1: vmovupd 816(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 816(<input_2=%rdx),>ee=%ymm2
vmovupd 816( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 640 ]
# asm 1: vmovupd 640(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 640(<input_1=%rsi),>pp=%ymm1
vmovupd 640( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 848 ]
# asm 1: vmovupd 848(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 848(<input_2=%rdx),>ee=%ymm2
vmovupd 848( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 672 ]
# asm 1: vmovupd 672(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 672(<input_1=%rsi),>pp=%ymm1
vmovupd 672( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 880 ]
# asm 1: vmovupd 880(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 880(<input_2=%rdx),>ee=%ymm2
vmovupd 880( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 704 ]
# asm 1: vmovupd 704(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 704(<input_1=%rsi),>pp=%ymm1
vmovupd 704( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 912 ]
# asm 1: vmovupd 912(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 912(<input_2=%rdx),>ee=%ymm2
vmovupd 912( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 736 ]
# asm 1: vmovupd 736(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 736(<input_1=%rsi),>pp=%ymm1
vmovupd 736( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 944 ]
# asm 1: vmovupd 944(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 944(<input_2=%rdx),>ee=%ymm2
vmovupd 944( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: pp = mem256[ input_1 + 768 ]
# asm 1: vmovupd 768(<input_1=int64#2),>pp=reg256#2
# asm 2: vmovupd 768(<input_1=%rsi),>pp=%ymm1
vmovupd 768( % rsi), % ymm1
# qhasm: ee = mem256[ input_2 + 976 ]
# asm 1: vmovupd 976(<input_2=int64#3),>ee=reg256#3
# asm 2: vmovupd 976(<input_2=%rdx),>ee=%ymm2
vmovupd 976( % rdx), % ymm2
# qhasm: pp &= ee
# asm 1: vpand <ee=reg256#3,<pp=reg256#2,<pp=reg256#2
# asm 2: vpand <ee=%ymm2,<pp=%ymm1,<pp=%ymm1
vpand % ymm2, % ymm1, % ymm1
# qhasm: ss ^= pp
# asm 1: vpxor <pp=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <pp=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: buf = ss
# asm 1: vmovapd <ss=reg256#1,>buf=stack256#1
# asm 2: vmovapd <ss=%ymm0,>buf=0(%rsp)
vmovapd % ymm0, 0( % rsp)
# qhasm: s = mem64[input_1 + 800]
# asm 1: movq 800(<input_1=int64#2),>s=int64#6
# asm 2: movq 800(<input_1=%rsi),>s=%r9
movq 800( % rsi), % r9
# qhasm: e = mem64[input_2 + 1008]
# asm 1: movq 1008(<input_2=int64#3),>e=int64#7
# asm 2: movq 1008(<input_2=%rdx),>e=%rax
movq 1008( % rdx), % rax
# qhasm: s &= e
# asm 1: and <e=int64#7,<s=int64#6
# asm 2: and <e=%rax,<s=%r9
and % rax, % r9
# qhasm: p = mem64[input_1 + 808]
# asm 1: movq 808(<input_1=int64#2),>p=int64#7
# asm 2: movq 808(<input_1=%rsi),>p=%rax
movq 808( % rsi), % rax
# qhasm: e = mem64[input_2 + 1016]
# asm 1: movq 1016(<input_2=int64#3),>e=int64#8
# asm 2: movq 1016(<input_2=%rdx),>e=%r10
movq 1016( % rdx), % r10
# qhasm: p &= e
# asm 1: and <e=int64#8,<p=int64#7
# asm 2: and <e=%r10,<p=%rax
and % r10, % rax
# qhasm: s ^= p
# asm 1: xor <p=int64#7,<s=int64#6
# asm 2: xor <p=%rax,<s=%r9
xor % rax, % r9
# qhasm: c_all = count(s)
# asm 1: popcnt <s=int64#6, >c_all=int64#6
# asm 2: popcnt <s=%r9, >c_all=%r9
popcnt % r9, % r9
# qhasm: b64 = mem64[ buf_ptr + 0 ]
# asm 1: movq 0(<buf_ptr=int64#4),>b64=int64#7
# asm 2: movq 0(<buf_ptr=%rcx),>b64=%rax
movq 0( % rcx), % rax
# qhasm: c = count(b64)
# asm 1: popcnt <b64=int64#7, >c=int64#7
# asm 2: popcnt <b64=%rax, >c=%rax
popcnt % rax, % rax
# qhasm: c_all ^= c
# asm 1: xor <c=int64#7,<c_all=int64#6
# asm 2: xor <c=%rax,<c_all=%r9
xor % rax, % r9
# qhasm: b64 = mem64[ buf_ptr + 8 ]
# asm 1: movq 8(<buf_ptr=int64#4),>b64=int64#7
# asm 2: movq 8(<buf_ptr=%rcx),>b64=%rax
movq 8( % rcx), % rax
# qhasm: c = count(b64)
# asm 1: popcnt <b64=int64#7, >c=int64#7
# asm 2: popcnt <b64=%rax, >c=%rax
popcnt % rax, % rax
# qhasm: c_all ^= c
# asm 1: xor <c=int64#7,<c_all=int64#6
# asm 2: xor <c=%rax,<c_all=%r9
xor % rax, % r9
# qhasm: b64 = mem64[ buf_ptr + 16 ]
# asm 1: movq 16(<buf_ptr=int64#4),>b64=int64#7
# asm 2: movq 16(<buf_ptr=%rcx),>b64=%rax
movq 16( % rcx), % rax
# qhasm: c = count(b64)
# asm 1: popcnt <b64=int64#7, >c=int64#7
# asm 2: popcnt <b64=%rax, >c=%rax
popcnt % rax, % rax
# qhasm: c_all ^= c
# asm 1: xor <c=int64#7,<c_all=int64#6
# asm 2: xor <c=%rax,<c_all=%r9
xor % rax, % r9
# qhasm: b64 = mem64[ buf_ptr + 24 ]
# asm 1: movq 24(<buf_ptr=int64#4),>b64=int64#7
# asm 2: movq 24(<buf_ptr=%rcx),>b64=%rax
movq 24( % rcx), % rax
# qhasm: c = count(b64)
# asm 1: popcnt <b64=int64#7, >c=int64#7
# asm 2: popcnt <b64=%rax, >c=%rax
popcnt % rax, % rax
# qhasm: c_all ^= c
# asm 1: xor <c=int64#7,<c_all=int64#6
# asm 2: xor <c=%rax,<c_all=%r9
xor % rax, % r9
# qhasm: addr = row
# asm 1: mov <row=int64#5,>addr=int64#7
# asm 2: mov <row=%r8,>addr=%rax
mov % r8, % rax
# qhasm: (uint64) addr >>= 3
# asm 1: shr $3,<addr=int64#7
# asm 2: shr $3,<addr=%rax
shr $3, % rax
# qhasm: addr += input_0
# asm 1: add <input_0=int64#1,<addr=int64#7
# asm 2: add <input_0=%rdi,<addr=%rax
add % rdi, % rax
# qhasm: synd = *(uint8 *) (addr + 0)
# asm 1: movzbq 0(<addr=int64#7),>synd=int64#8
# asm 2: movzbq 0(<addr=%rax),>synd=%r10
movzbq 0( % rax), % r10
# qhasm: synd <<= 1
# asm 1: shl $1,<synd=int64#8
# asm 2: shl $1,<synd=%r10
shl $1, % r10
# qhasm: (uint32) c_all &= 1
# asm 1: and $1,<c_all=int64#6d
# asm 2: and $1,<c_all=%r9d
and $1, % r9d
# qhasm: synd |= c_all
# asm 1: or <c_all=int64#6,<synd=int64#8
# asm 2: or <c_all=%r9,<synd=%r10
or % r9, % r10
# qhasm: *(uint8 *) (addr + 0) = synd
# asm 1: movb <synd=int64#8b,0(<addr=int64#7)
# asm 2: movb <synd=%r10b,0(<addr=%rax)
movb % r10b, 0( % rax)
# qhasm: input_1 -= 816
# asm 1: sub $816,<input_1=int64#2
# asm 2: sub $816,<input_1=%rsi
sub $816, % rsi
# qhasm: =? row-0
# asm 1: cmp $0,<row=int64#5
# asm 2: cmp $0,<row=%r8
cmp $0, % r8
# comment:fp stack unchanged by jump
# qhasm: goto loop if !=
jne ._loop
# qhasm: ss = mem256[ input_0 + 0 ]
# asm 1: vmovupd 0(<input_0=int64#1),>ss=reg256#1
# asm 2: vmovupd 0(<input_0=%rdi),>ss=%ymm0
vmovupd 0( % rdi), % ymm0
# qhasm: ee = mem256[ input_2 + 0 ]
# asm 1: vmovupd 0(<input_2=int64#3),>ee=reg256#2
# asm 2: vmovupd 0(<input_2=%rdx),>ee=%ymm1
vmovupd 0( % rdx), % ymm1
# qhasm: ss ^= ee
# asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: mem256[ input_0 + 0 ] = ss
# asm 1: vmovupd <ss=reg256#1,0(<input_0=int64#1)
# asm 2: vmovupd <ss=%ymm0,0(<input_0=%rdi)
vmovupd % ymm0, 0( % rdi)
# qhasm: ss = mem256[ input_0 + 32 ]
# asm 1: vmovupd 32(<input_0=int64#1),>ss=reg256#1
# asm 2: vmovupd 32(<input_0=%rdi),>ss=%ymm0
vmovupd 32( % rdi), % ymm0
# qhasm: ee = mem256[ input_2 + 32 ]
# asm 1: vmovupd 32(<input_2=int64#3),>ee=reg256#2
# asm 2: vmovupd 32(<input_2=%rdx),>ee=%ymm1
vmovupd 32( % rdx), % ymm1
# qhasm: ss ^= ee
# asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: mem256[ input_0 + 32 ] = ss
# asm 1: vmovupd <ss=reg256#1,32(<input_0=int64#1)
# asm 2: vmovupd <ss=%ymm0,32(<input_0=%rdi)
vmovupd % ymm0, 32( % rdi)
# qhasm: ss = mem256[ input_0 + 64 ]
# asm 1: vmovupd 64(<input_0=int64#1),>ss=reg256#1
# asm 2: vmovupd 64(<input_0=%rdi),>ss=%ymm0
vmovupd 64( % rdi), % ymm0
# qhasm: ee = mem256[ input_2 + 64 ]
# asm 1: vmovupd 64(<input_2=int64#3),>ee=reg256#2
# asm 2: vmovupd 64(<input_2=%rdx),>ee=%ymm1
vmovupd 64( % rdx), % ymm1
# qhasm: ss ^= ee
# asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: mem256[ input_0 + 64 ] = ss
# asm 1: vmovupd <ss=reg256#1,64(<input_0=int64#1)
# asm 2: vmovupd <ss=%ymm0,64(<input_0=%rdi)
vmovupd % ymm0, 64( % rdi)
# qhasm: ss = mem256[ input_0 + 96 ]
# asm 1: vmovupd 96(<input_0=int64#1),>ss=reg256#1
# asm 2: vmovupd 96(<input_0=%rdi),>ss=%ymm0
vmovupd 96( % rdi), % ymm0
# qhasm: ee = mem256[ input_2 + 96 ]
# asm 1: vmovupd 96(<input_2=int64#3),>ee=reg256#2
# asm 2: vmovupd 96(<input_2=%rdx),>ee=%ymm1
vmovupd 96( % rdx), % ymm1
# qhasm: ss ^= ee
# asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: mem256[ input_0 + 96 ] = ss
# asm 1: vmovupd <ss=reg256#1,96(<input_0=int64#1)
# asm 2: vmovupd <ss=%ymm0,96(<input_0=%rdi)
vmovupd % ymm0, 96( % rdi)
# qhasm: ss = mem256[ input_0 + 128 ]
# asm 1: vmovupd 128(<input_0=int64#1),>ss=reg256#1
# asm 2: vmovupd 128(<input_0=%rdi),>ss=%ymm0
vmovupd 128( % rdi), % ymm0
# qhasm: ee = mem256[ input_2 + 128 ]
# asm 1: vmovupd 128(<input_2=int64#3),>ee=reg256#2
# asm 2: vmovupd 128(<input_2=%rdx),>ee=%ymm1
vmovupd 128( % rdx), % ymm1
# qhasm: ss ^= ee
# asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: mem256[ input_0 + 128 ] = ss
# asm 1: vmovupd <ss=reg256#1,128(<input_0=int64#1)
# asm 2: vmovupd <ss=%ymm0,128(<input_0=%rdi)
vmovupd % ymm0, 128( % rdi)
# qhasm: ss = mem256[ input_0 + 160 ]
# asm 1: vmovupd 160(<input_0=int64#1),>ss=reg256#1
# asm 2: vmovupd 160(<input_0=%rdi),>ss=%ymm0
vmovupd 160( % rdi), % ymm0
# qhasm: ee = mem256[ input_2 + 160 ]
# asm 1: vmovupd 160(<input_2=int64#3),>ee=reg256#2
# asm 2: vmovupd 160(<input_2=%rdx),>ee=%ymm1
vmovupd 160( % rdx), % ymm1
# qhasm: ss ^= ee
# asm 1: vpxor <ee=reg256#2,<ss=reg256#1,<ss=reg256#1
# asm 2: vpxor <ee=%ymm1,<ss=%ymm0,<ss=%ymm0
vpxor % ymm1, % ymm0, % ymm0
# qhasm: mem256[ input_0 + 160 ] = ss
# asm 1: vmovupd <ss=reg256#1,160(<input_0=int64#1)
# asm 2: vmovupd <ss=%ymm0,160(<input_0=%rdi)
vmovupd % ymm0, 160( % rdi)
# qhasm: s = mem64[ input_0 + 192 ]
# asm 1: movq 192(<input_0=int64#1),>s=int64#2
# asm 2: movq 192(<input_0=%rdi),>s=%rsi
movq 192( % rdi), % rsi
# qhasm: e = mem64[ input_2 + 192 ]
# asm 1: movq 192(<input_2=int64#3),>e=int64#4
# asm 2: movq 192(<input_2=%rdx),>e=%rcx
movq 192( % rdx), % rcx
# qhasm: s ^= e
# asm 1: xor <e=int64#4,<s=int64#2
# asm 2: xor <e=%rcx,<s=%rsi
xor % rcx, % rsi
# qhasm: mem64[ input_0 + 192 ] = s
# asm 1: movq <s=int64#2,192(<input_0=int64#1)
# asm 2: movq <s=%rsi,192(<input_0=%rdi)
movq % rsi, 192( % rdi)
# qhasm: s = mem64[ input_0 + 200 ]
# asm 1: movq 200(<input_0=int64#1),>s=int64#2
# asm 2: movq 200(<input_0=%rdi),>s=%rsi
movq 200( % rdi), % rsi
# qhasm: e = mem64[ input_2 + 200 ]
# asm 1: movq 200(<input_2=int64#3),>e=int64#3
# asm 2: movq 200(<input_2=%rdx),>e=%rdx
movq 200( % rdx), % rdx
# qhasm: s ^= e
# asm 1: xor <e=int64#3,<s=int64#2
# asm 2: xor <e=%rdx,<s=%rsi
xor % rdx, % rsi
# qhasm: mem64[ input_0 + 200 ] = s
# asm 1: movq <s=int64#2,200(<input_0=int64#1)
# asm 2: movq <s=%rsi,200(<input_0=%rdi)
movq % rsi, 200( % rdi)
# qhasm: return
add % r11, % rsp
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 254,430
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/transpose_64x128_sp_asm.S
|
#include "namespace.h"
#define MASK0_0 CRYPTO_NAMESPACE(MASK0_0)
#define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0)
#define MASK0_1 CRYPTO_NAMESPACE(MASK0_1)
#define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1)
#define MASK1_0 CRYPTO_NAMESPACE(MASK1_0)
#define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0)
#define MASK1_1 CRYPTO_NAMESPACE(MASK1_1)
#define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1)
#define MASK2_0 CRYPTO_NAMESPACE(MASK2_0)
#define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0)
#define MASK2_1 CRYPTO_NAMESPACE(MASK2_1)
#define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1)
#define MASK3_0 CRYPTO_NAMESPACE(MASK3_0)
#define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0)
#define MASK3_1 CRYPTO_NAMESPACE(MASK3_1)
#define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1)
#define MASK4_0 CRYPTO_NAMESPACE(MASK4_0)
#define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0)
#define MASK4_1 CRYPTO_NAMESPACE(MASK4_1)
#define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1)
#define MASK5_0 CRYPTO_NAMESPACE(MASK5_0)
#define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0)
#define MASK5_1 CRYPTO_NAMESPACE(MASK5_1)
#define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1)
#define transpose_64x128_sp_asm CRYPTO_NAMESPACE(transpose_64x128_sp_asm)
#define _transpose_64x128_sp_asm _CRYPTO_NAMESPACE(transpose_64x128_sp_asm)
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: reg128 x0
# qhasm: reg128 x1
# qhasm: reg128 x2
# qhasm: reg128 x3
# qhasm: reg128 x4
# qhasm: reg128 x5
# qhasm: reg128 x6
# qhasm: reg128 x7
# qhasm: reg128 t0
# qhasm: reg128 t1
# qhasm: reg128 v00
# qhasm: reg128 v01
# qhasm: reg128 v10
# qhasm: reg128 v11
# qhasm: reg128 mask0
# qhasm: reg128 mask1
# qhasm: reg128 mask2
# qhasm: reg128 mask3
# qhasm: reg128 mask4
# qhasm: reg128 mask5
# qhasm: enter transpose_64x128_sp_asm
.p2align 5
.global _transpose_64x128_sp_asm
.global transpose_64x128_sp_asm
_transpose_64x128_sp_asm:
transpose_64x128_sp_asm:
mov % rsp, % r11
and $31, % r11
add $0, % r11
sub % r11, % rsp
# qhasm: mask0 aligned= mem128[ MASK5_0 ]
# asm 1: movdqa MASK5_0(%rip),>mask0=reg128#1
# asm 2: movdqa MASK5_0(%rip),>mask0=%xmm0
movdqa MASK5_0( % rip), % xmm0
# qhasm: mask1 aligned= mem128[ MASK5_1 ]
# asm 1: movdqa MASK5_1(%rip),>mask1=reg128#2
# asm 2: movdqa MASK5_1(%rip),>mask1=%xmm1
movdqa MASK5_1( % rip), % xmm1
# qhasm: mask2 aligned= mem128[ MASK4_0 ]
# asm 1: movdqa MASK4_0(%rip),>mask2=reg128#3
# asm 2: movdqa MASK4_0(%rip),>mask2=%xmm2
movdqa MASK4_0( % rip), % xmm2
# qhasm: mask3 aligned= mem128[ MASK4_1 ]
# asm 1: movdqa MASK4_1(%rip),>mask3=reg128#4
# asm 2: movdqa MASK4_1(%rip),>mask3=%xmm3
movdqa MASK4_1( % rip), % xmm3
# qhasm: mask4 aligned= mem128[ MASK3_0 ]
# asm 1: movdqa MASK3_0(%rip),>mask4=reg128#5
# asm 2: movdqa MASK3_0(%rip),>mask4=%xmm4
movdqa MASK3_0( % rip), % xmm4
# qhasm: mask5 aligned= mem128[ MASK3_1 ]
# asm 1: movdqa MASK3_1(%rip),>mask5=reg128#6
# asm 2: movdqa MASK3_1(%rip),>mask5=%xmm5
movdqa MASK3_1( % rip), % xmm5
# qhasm: x0 = mem128[ input_0 + 0 ]
# asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6
movdqu 0( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 128 ]
# asm 1: movdqu 128(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 128(<input_0=%rdi),>x1=%xmm7
movdqu 128( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 256 ]
# asm 1: movdqu 256(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 256(<input_0=%rdi),>x2=%xmm8
movdqu 256( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 384 ]
# asm 1: movdqu 384(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 384(<input_0=%rdi),>x3=%xmm9
movdqu 384( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 512 ]
# asm 1: movdqu 512(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 512(<input_0=%rdi),>x4=%xmm10
movdqu 512( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 640 ]
# asm 1: movdqu 640(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 640(<input_0=%rdi),>x5=%xmm11
movdqu 640( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 768 ]
# asm 1: movdqu 768(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 768(<input_0=%rdi),>x6=%xmm12
movdqu 768( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 896 ]
# asm 1: movdqu 896(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 896(<input_0=%rdi),>x7=%xmm13
movdqu 896( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 0 ] = x0
# asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi)
movdqu % xmm9, 0( % rdi)
# qhasm: mem128[ input_0 + 128 ] = x1
# asm 1: movdqu <x1=reg128#14,128(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,128(<input_0=%rdi)
movdqu % xmm13, 128( % rdi)
# qhasm: mem128[ input_0 + 256 ] = x2
# asm 1: movdqu <x2=reg128#15,256(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,256(<input_0=%rdi)
movdqu % xmm14, 256( % rdi)
# qhasm: mem128[ input_0 + 384 ] = x3
# asm 1: movdqu <x3=reg128#11,384(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,384(<input_0=%rdi)
movdqu % xmm10, 384( % rdi)
# qhasm: mem128[ input_0 + 512 ] = x4
# asm 1: movdqu <x4=reg128#12,512(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,512(<input_0=%rdi)
movdqu % xmm11, 512( % rdi)
# qhasm: mem128[ input_0 + 640 ] = x5
# asm 1: movdqu <x5=reg128#9,640(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,640(<input_0=%rdi)
movdqu % xmm8, 640( % rdi)
# qhasm: mem128[ input_0 + 768 ] = x6
# asm 1: movdqu <x6=reg128#13,768(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,768(<input_0=%rdi)
movdqu % xmm12, 768( % rdi)
# qhasm: mem128[ input_0 + 896 ] = x7
# asm 1: movdqu <x7=reg128#7,896(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,896(<input_0=%rdi)
movdqu % xmm6, 896( % rdi)
# qhasm: x0 = mem128[ input_0 + 16 ]
# asm 1: movdqu 16(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 16(<input_0=%rdi),>x0=%xmm6
movdqu 16( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 144 ]
# asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7
movdqu 144( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 272 ]
# asm 1: movdqu 272(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 272(<input_0=%rdi),>x2=%xmm8
movdqu 272( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 400 ]
# asm 1: movdqu 400(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 400(<input_0=%rdi),>x3=%xmm9
movdqu 400( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 528 ]
# asm 1: movdqu 528(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 528(<input_0=%rdi),>x4=%xmm10
movdqu 528( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 656 ]
# asm 1: movdqu 656(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 656(<input_0=%rdi),>x5=%xmm11
movdqu 656( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 784 ]
# asm 1: movdqu 784(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 784(<input_0=%rdi),>x6=%xmm12
movdqu 784( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 912 ]
# asm 1: movdqu 912(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 912(<input_0=%rdi),>x7=%xmm13
movdqu 912( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 16 ] = x0
# asm 1: movdqu <x0=reg128#10,16(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,16(<input_0=%rdi)
movdqu % xmm9, 16( % rdi)
# qhasm: mem128[ input_0 + 144 ] = x1
# asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi)
movdqu % xmm13, 144( % rdi)
# qhasm: mem128[ input_0 + 272 ] = x2
# asm 1: movdqu <x2=reg128#15,272(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,272(<input_0=%rdi)
movdqu % xmm14, 272( % rdi)
# qhasm: mem128[ input_0 + 400 ] = x3
# asm 1: movdqu <x3=reg128#11,400(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,400(<input_0=%rdi)
movdqu % xmm10, 400( % rdi)
# qhasm: mem128[ input_0 + 528 ] = x4
# asm 1: movdqu <x4=reg128#12,528(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,528(<input_0=%rdi)
movdqu % xmm11, 528( % rdi)
# qhasm: mem128[ input_0 + 656 ] = x5
# asm 1: movdqu <x5=reg128#9,656(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,656(<input_0=%rdi)
movdqu % xmm8, 656( % rdi)
# qhasm: mem128[ input_0 + 784 ] = x6
# asm 1: movdqu <x6=reg128#13,784(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,784(<input_0=%rdi)
movdqu % xmm12, 784( % rdi)
# qhasm: mem128[ input_0 + 912 ] = x7
# asm 1: movdqu <x7=reg128#7,912(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,912(<input_0=%rdi)
movdqu % xmm6, 912( % rdi)
# qhasm: x0 = mem128[ input_0 + 32 ]
# asm 1: movdqu 32(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 32(<input_0=%rdi),>x0=%xmm6
movdqu 32( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 160 ]
# asm 1: movdqu 160(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 160(<input_0=%rdi),>x1=%xmm7
movdqu 160( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 288 ]
# asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8
movdqu 288( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 416 ]
# asm 1: movdqu 416(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 416(<input_0=%rdi),>x3=%xmm9
movdqu 416( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 544 ]
# asm 1: movdqu 544(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 544(<input_0=%rdi),>x4=%xmm10
movdqu 544( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 672 ]
# asm 1: movdqu 672(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 672(<input_0=%rdi),>x5=%xmm11
movdqu 672( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 800 ]
# asm 1: movdqu 800(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 800(<input_0=%rdi),>x6=%xmm12
movdqu 800( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 928 ]
# asm 1: movdqu 928(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 928(<input_0=%rdi),>x7=%xmm13
movdqu 928( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 32 ] = x0
# asm 1: movdqu <x0=reg128#10,32(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,32(<input_0=%rdi)
movdqu % xmm9, 32( % rdi)
# qhasm: mem128[ input_0 + 160 ] = x1
# asm 1: movdqu <x1=reg128#14,160(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,160(<input_0=%rdi)
movdqu % xmm13, 160( % rdi)
# qhasm: mem128[ input_0 + 288 ] = x2
# asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi)
movdqu % xmm14, 288( % rdi)
# qhasm: mem128[ input_0 + 416 ] = x3
# asm 1: movdqu <x3=reg128#11,416(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,416(<input_0=%rdi)
movdqu % xmm10, 416( % rdi)
# qhasm: mem128[ input_0 + 544 ] = x4
# asm 1: movdqu <x4=reg128#12,544(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,544(<input_0=%rdi)
movdqu % xmm11, 544( % rdi)
# qhasm: mem128[ input_0 + 672 ] = x5
# asm 1: movdqu <x5=reg128#9,672(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,672(<input_0=%rdi)
movdqu % xmm8, 672( % rdi)
# qhasm: mem128[ input_0 + 800 ] = x6
# asm 1: movdqu <x6=reg128#13,800(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,800(<input_0=%rdi)
movdqu % xmm12, 800( % rdi)
# qhasm: mem128[ input_0 + 928 ] = x7
# asm 1: movdqu <x7=reg128#7,928(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,928(<input_0=%rdi)
movdqu % xmm6, 928( % rdi)
# qhasm: x0 = mem128[ input_0 + 48 ]
# asm 1: movdqu 48(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 48(<input_0=%rdi),>x0=%xmm6
movdqu 48( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 176 ]
# asm 1: movdqu 176(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 176(<input_0=%rdi),>x1=%xmm7
movdqu 176( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 304 ]
# asm 1: movdqu 304(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 304(<input_0=%rdi),>x2=%xmm8
movdqu 304( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 432 ]
# asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9
movdqu 432( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 560 ]
# asm 1: movdqu 560(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 560(<input_0=%rdi),>x4=%xmm10
movdqu 560( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 688 ]
# asm 1: movdqu 688(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 688(<input_0=%rdi),>x5=%xmm11
movdqu 688( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 816 ]
# asm 1: movdqu 816(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 816(<input_0=%rdi),>x6=%xmm12
movdqu 816( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 944 ]
# asm 1: movdqu 944(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 944(<input_0=%rdi),>x7=%xmm13
movdqu 944( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 48 ] = x0
# asm 1: movdqu <x0=reg128#10,48(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,48(<input_0=%rdi)
movdqu % xmm9, 48( % rdi)
# qhasm: mem128[ input_0 + 176 ] = x1
# asm 1: movdqu <x1=reg128#14,176(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,176(<input_0=%rdi)
movdqu % xmm13, 176( % rdi)
# qhasm: mem128[ input_0 + 304 ] = x2
# asm 1: movdqu <x2=reg128#15,304(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,304(<input_0=%rdi)
movdqu % xmm14, 304( % rdi)
# qhasm: mem128[ input_0 + 432 ] = x3
# asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi)
movdqu % xmm10, 432( % rdi)
# qhasm: mem128[ input_0 + 560 ] = x4
# asm 1: movdqu <x4=reg128#12,560(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,560(<input_0=%rdi)
movdqu % xmm11, 560( % rdi)
# qhasm: mem128[ input_0 + 688 ] = x5
# asm 1: movdqu <x5=reg128#9,688(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,688(<input_0=%rdi)
movdqu % xmm8, 688( % rdi)
# qhasm: mem128[ input_0 + 816 ] = x6
# asm 1: movdqu <x6=reg128#13,816(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,816(<input_0=%rdi)
movdqu % xmm12, 816( % rdi)
# qhasm: mem128[ input_0 + 944 ] = x7
# asm 1: movdqu <x7=reg128#7,944(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,944(<input_0=%rdi)
movdqu % xmm6, 944( % rdi)
# qhasm: x0 = mem128[ input_0 + 64 ]
# asm 1: movdqu 64(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 64(<input_0=%rdi),>x0=%xmm6
movdqu 64( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 192 ]
# asm 1: movdqu 192(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 192(<input_0=%rdi),>x1=%xmm7
movdqu 192( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 320 ]
# asm 1: movdqu 320(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 320(<input_0=%rdi),>x2=%xmm8
movdqu 320( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 448 ]
# asm 1: movdqu 448(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 448(<input_0=%rdi),>x3=%xmm9
movdqu 448( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 576 ]
# asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10
movdqu 576( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 704 ]
# asm 1: movdqu 704(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 704(<input_0=%rdi),>x5=%xmm11
movdqu 704( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 832 ]
# asm 1: movdqu 832(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 832(<input_0=%rdi),>x6=%xmm12
movdqu 832( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 960 ]
# asm 1: movdqu 960(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 960(<input_0=%rdi),>x7=%xmm13
movdqu 960( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 64 ] = x0
# asm 1: movdqu <x0=reg128#10,64(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,64(<input_0=%rdi)
movdqu % xmm9, 64( % rdi)
# qhasm: mem128[ input_0 + 192 ] = x1
# asm 1: movdqu <x1=reg128#14,192(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,192(<input_0=%rdi)
movdqu % xmm13, 192( % rdi)
# qhasm: mem128[ input_0 + 320 ] = x2
# asm 1: movdqu <x2=reg128#15,320(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,320(<input_0=%rdi)
movdqu % xmm14, 320( % rdi)
# qhasm: mem128[ input_0 + 448 ] = x3
# asm 1: movdqu <x3=reg128#11,448(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,448(<input_0=%rdi)
movdqu % xmm10, 448( % rdi)
# qhasm: mem128[ input_0 + 576 ] = x4
# asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi)
movdqu % xmm11, 576( % rdi)
# qhasm: mem128[ input_0 + 704 ] = x5
# asm 1: movdqu <x5=reg128#9,704(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,704(<input_0=%rdi)
movdqu % xmm8, 704( % rdi)
# qhasm: mem128[ input_0 + 832 ] = x6
# asm 1: movdqu <x6=reg128#13,832(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,832(<input_0=%rdi)
movdqu % xmm12, 832( % rdi)
# qhasm: mem128[ input_0 + 960 ] = x7
# asm 1: movdqu <x7=reg128#7,960(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,960(<input_0=%rdi)
movdqu % xmm6, 960( % rdi)
# qhasm: x0 = mem128[ input_0 + 80 ]
# asm 1: movdqu 80(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 80(<input_0=%rdi),>x0=%xmm6
movdqu 80( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 208 ]
# asm 1: movdqu 208(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 208(<input_0=%rdi),>x1=%xmm7
movdqu 208( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 336 ]
# asm 1: movdqu 336(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 336(<input_0=%rdi),>x2=%xmm8
movdqu 336( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 464 ]
# asm 1: movdqu 464(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 464(<input_0=%rdi),>x3=%xmm9
movdqu 464( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 592 ]
# asm 1: movdqu 592(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 592(<input_0=%rdi),>x4=%xmm10
movdqu 592( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 720 ]
# asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11
movdqu 720( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 848 ]
# asm 1: movdqu 848(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 848(<input_0=%rdi),>x6=%xmm12
movdqu 848( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 976 ]
# asm 1: movdqu 976(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 976(<input_0=%rdi),>x7=%xmm13
movdqu 976( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 80 ] = x0
# asm 1: movdqu <x0=reg128#10,80(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,80(<input_0=%rdi)
movdqu % xmm9, 80( % rdi)
# qhasm: mem128[ input_0 + 208 ] = x1
# asm 1: movdqu <x1=reg128#14,208(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,208(<input_0=%rdi)
movdqu % xmm13, 208( % rdi)
# qhasm: mem128[ input_0 + 336 ] = x2
# asm 1: movdqu <x2=reg128#15,336(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,336(<input_0=%rdi)
movdqu % xmm14, 336( % rdi)
# qhasm: mem128[ input_0 + 464 ] = x3
# asm 1: movdqu <x3=reg128#11,464(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,464(<input_0=%rdi)
movdqu % xmm10, 464( % rdi)
# qhasm: mem128[ input_0 + 592 ] = x4
# asm 1: movdqu <x4=reg128#12,592(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,592(<input_0=%rdi)
movdqu % xmm11, 592( % rdi)
# qhasm: mem128[ input_0 + 720 ] = x5
# asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi)
movdqu % xmm8, 720( % rdi)
# qhasm: mem128[ input_0 + 848 ] = x6
# asm 1: movdqu <x6=reg128#13,848(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,848(<input_0=%rdi)
movdqu % xmm12, 848( % rdi)
# qhasm: mem128[ input_0 + 976 ] = x7
# asm 1: movdqu <x7=reg128#7,976(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,976(<input_0=%rdi)
movdqu % xmm6, 976( % rdi)
# qhasm: x0 = mem128[ input_0 + 96 ]
# asm 1: movdqu 96(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 96(<input_0=%rdi),>x0=%xmm6
movdqu 96( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 224 ]
# asm 1: movdqu 224(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 224(<input_0=%rdi),>x1=%xmm7
movdqu 224( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 352 ]
# asm 1: movdqu 352(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 352(<input_0=%rdi),>x2=%xmm8
movdqu 352( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 480 ]
# asm 1: movdqu 480(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 480(<input_0=%rdi),>x3=%xmm9
movdqu 480( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 608 ]
# asm 1: movdqu 608(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 608(<input_0=%rdi),>x4=%xmm10
movdqu 608( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 736 ]
# asm 1: movdqu 736(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 736(<input_0=%rdi),>x5=%xmm11
movdqu 736( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 864 ]
# asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12
movdqu 864( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 992 ]
# asm 1: movdqu 992(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 992(<input_0=%rdi),>x7=%xmm13
movdqu 992( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: 2x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#16
# asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm15
vpsllq $32, % xmm13, % xmm15
# qhasm: 2x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: 4x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#16
# asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm15
vpslld $16, % xmm11, % xmm15
# qhasm: 4x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#15
# asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm14
vpsrld $16, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: 4x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg128#13,>v10=reg128#16
# asm 2: vpslld $16,<x3=%xmm12,>v10=%xmm15
vpslld $16, % xmm12, % xmm15
# qhasm: 4x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: 4x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#16
# asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm15
vpslld $16, % xmm8, % xmm15
# qhasm: 4x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: 4x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg128#10,>v10=reg128#16
# asm 2: vpslld $16,<x7=%xmm9,>v10=%xmm15
vpslld $16, % xmm9, % xmm15
# qhasm: 4x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: 8x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg128#15,>v10=reg128#16
# asm 2: vpsllw $8,<x1=%xmm14,>v10=%xmm15
vpsllw $8, % xmm14, % xmm15
# qhasm: 8x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg128#14,>v01=reg128#14
# asm 2: vpsrlw $8,<x0=%xmm13,>v01=%xmm13
vpsrlw $8, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: 8x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg128#11,>v10=reg128#16
# asm 2: vpsllw $8,<x3=%xmm10,>v10=%xmm15
vpsllw $8, % xmm10, % xmm15
# qhasm: 8x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: 8x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg128#9,>v10=reg128#16
# asm 2: vpsllw $8,<x5=%xmm8,>v10=%xmm15
vpsllw $8, % xmm8, % xmm15
# qhasm: 8x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg128#13,>v01=reg128#13
# asm 2: vpsrlw $8,<x4=%xmm12,>v01=%xmm12
vpsrlw $8, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: 8x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg128#8,>v10=reg128#16
# asm 2: vpsllw $8,<x7=%xmm7,>v10=%xmm15
vpsllw $8, % xmm7, % xmm15
# qhasm: 8x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 96 ] = x0
# asm 1: movdqu <x0=reg128#10,96(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,96(<input_0=%rdi)
movdqu % xmm9, 96( % rdi)
# qhasm: mem128[ input_0 + 224 ] = x1
# asm 1: movdqu <x1=reg128#14,224(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,224(<input_0=%rdi)
movdqu % xmm13, 224( % rdi)
# qhasm: mem128[ input_0 + 352 ] = x2
# asm 1: movdqu <x2=reg128#15,352(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,352(<input_0=%rdi)
movdqu % xmm14, 352( % rdi)
# qhasm: mem128[ input_0 + 480 ] = x3
# asm 1: movdqu <x3=reg128#11,480(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,480(<input_0=%rdi)
movdqu % xmm10, 480( % rdi)
# qhasm: mem128[ input_0 + 608 ] = x4
# asm 1: movdqu <x4=reg128#12,608(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,608(<input_0=%rdi)
movdqu % xmm11, 608( % rdi)
# qhasm: mem128[ input_0 + 736 ] = x5
# asm 1: movdqu <x5=reg128#9,736(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,736(<input_0=%rdi)
movdqu % xmm8, 736( % rdi)
# qhasm: mem128[ input_0 + 864 ] = x6
# asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi)
movdqu % xmm12, 864( % rdi)
# qhasm: mem128[ input_0 + 992 ] = x7
# asm 1: movdqu <x7=reg128#7,992(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,992(<input_0=%rdi)
movdqu % xmm6, 992( % rdi)
# qhasm: x0 = mem128[ input_0 + 112 ]
# asm 1: movdqu 112(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 112(<input_0=%rdi),>x0=%xmm6
movdqu 112( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 240 ]
# asm 1: movdqu 240(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 240(<input_0=%rdi),>x1=%xmm7
movdqu 240( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 368 ]
# asm 1: movdqu 368(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 368(<input_0=%rdi),>x2=%xmm8
movdqu 368( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 496 ]
# asm 1: movdqu 496(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 496(<input_0=%rdi),>x3=%xmm9
movdqu 496( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 624 ]
# asm 1: movdqu 624(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 624(<input_0=%rdi),>x4=%xmm10
movdqu 624( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 752 ]
# asm 1: movdqu 752(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 752(<input_0=%rdi),>x5=%xmm11
movdqu 752( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 880 ]
# asm 1: movdqu 880(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 880(<input_0=%rdi),>x6=%xmm12
movdqu 880( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 1008 ]
# asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13
movdqu 1008( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: 2x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg128#11,>v10=reg128#16
# asm 2: vpsllq $32,<x4=%xmm10,>v10=%xmm15
vpsllq $32, % xmm10, % xmm15
# qhasm: 2x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg128#7,>v01=reg128#7
# asm 2: vpsrlq $32,<x0=%xmm6,>v01=%xmm6
vpsrlq $32, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: 2x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg128#12,>v10=reg128#16
# asm 2: vpsllq $32,<x5=%xmm11,>v10=%xmm15
vpsllq $32, % xmm11, % xmm15
# qhasm: 2x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg128#8,>v01=reg128#8
# asm 2: vpsrlq $32,<x1=%xmm7,>v01=%xmm7
vpsrlq $32, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: 2x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg128#13,>v10=reg128#16
# asm 2: vpsllq $32,<x6=%xmm12,>v10=%xmm15
vpsllq $32, % xmm12, % xmm15
# qhasm: 2x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg128#9,>v01=reg128#9
# asm 2: vpsrlq $32,<x2=%xmm8,>v01=%xmm8
vpsrlq $32, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#1
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm0
vpand % xmm0, % xmm9, % xmm0
# qhasm: 2x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg128#14,>v10=reg128#13
# asm 2: vpsllq $32,<x7=%xmm13,>v10=%xmm12
vpsllq $32, % xmm13, % xmm12
# qhasm: 2x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg128#10,>v01=reg128#10
# asm 2: vpsrlq $32,<x3=%xmm9,>v01=%xmm9
vpsrlq $32, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1
vpand % xmm1, % xmm13, % xmm1
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#13,<v00=reg128#1,>x3=reg128#1
# asm 2: vpor <v10=%xmm12,<v00=%xmm0,>x3=%xmm0
vpor % xmm12, % xmm0, % xmm0
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1
vpor % xmm1, % xmm9, % xmm1
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9
vpand % xmm2, % xmm14, % xmm9
# qhasm: 4x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg128#12,>v10=reg128#13
# asm 2: vpslld $16,<x2=%xmm11,>v10=%xmm12
vpslld $16, % xmm11, % xmm12
# qhasm: 4x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg128#15,>v01=reg128#14
# asm 2: vpsrld $16,<x0=%xmm14,>v01=%xmm13
vpsrld $16, % xmm14, % xmm13
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9
vpor % xmm12, % xmm9, % xmm9
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11
vpor % xmm11, % xmm13, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12
vpand % xmm2, % xmm10, % xmm12
# qhasm: 4x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg128#1,>v10=reg128#14
# asm 2: vpslld $16,<x3=%xmm0,>v10=%xmm13
vpslld $16, % xmm0, % xmm13
# qhasm: 4x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg128#11,>v01=reg128#11
# asm 2: vpsrld $16,<x1=%xmm10,>v01=%xmm10
vpsrld $16, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1
# asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0
vpand % xmm3, % xmm0, % xmm0
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13
# asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12
vpor % xmm13, % xmm12, % xmm12
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1
# asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0
vpor % xmm0, % xmm10, % xmm0
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10
vpand % xmm2, % xmm6, % xmm10
# qhasm: 4x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg128#9,>v10=reg128#14
# asm 2: vpslld $16,<x6=%xmm8,>v10=%xmm13
vpslld $16, % xmm8, % xmm13
# qhasm: 4x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg128#7,>v01=reg128#7
# asm 2: vpsrld $16,<x4=%xmm6,>v01=%xmm6
vpsrld $16, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11
# asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10
vpor % xmm13, % xmm10, % xmm10
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#3
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm2
vpand % xmm2, % xmm7, % xmm2
# qhasm: 4x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg128#2,>v10=reg128#9
# asm 2: vpslld $16,<x7=%xmm1,>v10=%xmm8
vpslld $16, % xmm1, % xmm8
# qhasm: 4x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg128#8,>v01=reg128#8
# asm 2: vpsrld $16,<x5=%xmm7,>v01=%xmm7
vpsrld $16, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2
# asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1
vpand % xmm3, % xmm1, % xmm1
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#9,<v00=reg128#3,>x5=reg128#3
# asm 2: vpor <v10=%xmm8,<v00=%xmm2,>x5=%xmm2
vpor % xmm8, % xmm2, % xmm2
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1
vpor % xmm1, % xmm7, % xmm1
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4
# asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3
vpand % xmm4, % xmm9, % xmm3
# qhasm: 8x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg128#13,>v10=reg128#8
# asm 2: vpsllw $8,<x1=%xmm12,>v10=%xmm7
vpsllw $8, % xmm12, % xmm7
# qhasm: 8x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg128#10,>v01=reg128#9
# asm 2: vpsrlw $8,<x0=%xmm9,>v01=%xmm8
vpsrlw $8, % xmm9, % xmm8
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10
# asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9
vpand % xmm5, % xmm12, % xmm9
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4
# asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3
vpor % xmm7, % xmm3, % xmm3
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7
vpor % xmm9, % xmm8, % xmm7
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8
vpand % xmm4, % xmm11, % xmm8
# qhasm: 8x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg128#1,>v10=reg128#10
# asm 2: vpsllw $8,<x3=%xmm0,>v10=%xmm9
vpsllw $8, % xmm0, % xmm9
# qhasm: 8x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg128#12,>v01=reg128#12
# asm 2: vpsrlw $8,<x2=%xmm11,>v01=%xmm11
vpsrlw $8, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1
# asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0
vpand % xmm5, % xmm0, % xmm0
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9
# asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8
vpor % xmm9, % xmm8, % xmm8
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1
# asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0
vpor % xmm0, % xmm11, % xmm0
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9
vpand % xmm4, % xmm10, % xmm9
# qhasm: 8x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg128#3,>v10=reg128#12
# asm 2: vpsllw $8,<x5=%xmm2,>v10=%xmm11
vpsllw $8, % xmm2, % xmm11
# qhasm: 8x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg128#11,>v01=reg128#11
# asm 2: vpsrlw $8,<x4=%xmm10,>v01=%xmm10
vpsrlw $8, % xmm10, % xmm10
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3
# asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2
vpand % xmm5, % xmm2, % xmm2
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10
# asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9
vpor % xmm11, % xmm9, % xmm9
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3
# asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2
vpor % xmm2, % xmm10, % xmm2
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#5
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm4
vpand % xmm4, % xmm6, % xmm4
# qhasm: 8x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg128#2,>v10=reg128#11
# asm 2: vpsllw $8,<x7=%xmm1,>v10=%xmm10
vpsllw $8, % xmm1, % xmm10
# qhasm: 8x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg128#7,>v01=reg128#7
# asm 2: vpsrlw $8,<x6=%xmm6,>v01=%xmm6
vpsrlw $8, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2
# asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1
vpand % xmm5, % xmm1, % xmm1
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#11,<v00=reg128#5,>x6=reg128#5
# asm 2: vpor <v10=%xmm10,<v00=%xmm4,>x6=%xmm4
vpor % xmm10, % xmm4, % xmm4
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1
vpor % xmm1, % xmm6, % xmm1
# qhasm: mem128[ input_0 + 112 ] = x0
# asm 1: movdqu <x0=reg128#4,112(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm3,112(<input_0=%rdi)
movdqu % xmm3, 112( % rdi)
# qhasm: mem128[ input_0 + 240 ] = x1
# asm 1: movdqu <x1=reg128#8,240(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm7,240(<input_0=%rdi)
movdqu % xmm7, 240( % rdi)
# qhasm: mem128[ input_0 + 368 ] = x2
# asm 1: movdqu <x2=reg128#9,368(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm8,368(<input_0=%rdi)
movdqu % xmm8, 368( % rdi)
# qhasm: mem128[ input_0 + 496 ] = x3
# asm 1: movdqu <x3=reg128#1,496(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm0,496(<input_0=%rdi)
movdqu % xmm0, 496( % rdi)
# qhasm: mem128[ input_0 + 624 ] = x4
# asm 1: movdqu <x4=reg128#10,624(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm9,624(<input_0=%rdi)
movdqu % xmm9, 624( % rdi)
# qhasm: mem128[ input_0 + 752 ] = x5
# asm 1: movdqu <x5=reg128#3,752(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm2,752(<input_0=%rdi)
movdqu % xmm2, 752( % rdi)
# qhasm: mem128[ input_0 + 880 ] = x6
# asm 1: movdqu <x6=reg128#5,880(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm4,880(<input_0=%rdi)
movdqu % xmm4, 880( % rdi)
# qhasm: mem128[ input_0 + 1008 ] = x7
# asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi)
movdqu % xmm1, 1008( % rdi)
# qhasm: mask0 aligned= mem128[ MASK2_0 ]
# asm 1: movdqa MASK2_0(%rip),>mask0=reg128#1
# asm 2: movdqa MASK2_0(%rip),>mask0=%xmm0
movdqa MASK2_0( % rip), % xmm0
# qhasm: mask1 aligned= mem128[ MASK2_1 ]
# asm 1: movdqa MASK2_1(%rip),>mask1=reg128#2
# asm 2: movdqa MASK2_1(%rip),>mask1=%xmm1
movdqa MASK2_1( % rip), % xmm1
# qhasm: mask2 aligned= mem128[ MASK1_0 ]
# asm 1: movdqa MASK1_0(%rip),>mask2=reg128#3
# asm 2: movdqa MASK1_0(%rip),>mask2=%xmm2
movdqa MASK1_0( % rip), % xmm2
# qhasm: mask3 aligned= mem128[ MASK1_1 ]
# asm 1: movdqa MASK1_1(%rip),>mask3=reg128#4
# asm 2: movdqa MASK1_1(%rip),>mask3=%xmm3
movdqa MASK1_1( % rip), % xmm3
# qhasm: mask4 aligned= mem128[ MASK0_0 ]
# asm 1: movdqa MASK0_0(%rip),>mask4=reg128#5
# asm 2: movdqa MASK0_0(%rip),>mask4=%xmm4
movdqa MASK0_0( % rip), % xmm4
# qhasm: mask5 aligned= mem128[ MASK0_1 ]
# asm 1: movdqa MASK0_1(%rip),>mask5=reg128#6
# asm 2: movdqa MASK0_1(%rip),>mask5=%xmm5
movdqa MASK0_1( % rip), % xmm5
# qhasm: x0 = mem128[ input_0 + 0 ]
# asm 1: movdqu 0(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 0(<input_0=%rdi),>x0=%xmm6
movdqu 0( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 16 ]
# asm 1: movdqu 16(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 16(<input_0=%rdi),>x1=%xmm7
movdqu 16( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 32 ]
# asm 1: movdqu 32(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 32(<input_0=%rdi),>x2=%xmm8
movdqu 32( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 48 ]
# asm 1: movdqu 48(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 48(<input_0=%rdi),>x3=%xmm9
movdqu 48( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 64 ]
# asm 1: movdqu 64(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 64(<input_0=%rdi),>x4=%xmm10
movdqu 64( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 80 ]
# asm 1: movdqu 80(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 80(<input_0=%rdi),>x5=%xmm11
movdqu 80( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 96 ]
# asm 1: movdqu 96(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 96(<input_0=%rdi),>x6=%xmm12
movdqu 96( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 112 ]
# asm 1: movdqu 112(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 112(<input_0=%rdi),>x7=%xmm13
movdqu 112( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 0 ] = x0
# asm 1: movdqu <x0=reg128#10,0(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,0(<input_0=%rdi)
movdqu % xmm9, 0( % rdi)
# qhasm: mem128[ input_0 + 16 ] = x1
# asm 1: movdqu <x1=reg128#14,16(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,16(<input_0=%rdi)
movdqu % xmm13, 16( % rdi)
# qhasm: mem128[ input_0 + 32 ] = x2
# asm 1: movdqu <x2=reg128#15,32(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,32(<input_0=%rdi)
movdqu % xmm14, 32( % rdi)
# qhasm: mem128[ input_0 + 48 ] = x3
# asm 1: movdqu <x3=reg128#11,48(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,48(<input_0=%rdi)
movdqu % xmm10, 48( % rdi)
# qhasm: mem128[ input_0 + 64 ] = x4
# asm 1: movdqu <x4=reg128#12,64(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,64(<input_0=%rdi)
movdqu % xmm11, 64( % rdi)
# qhasm: mem128[ input_0 + 80 ] = x5
# asm 1: movdqu <x5=reg128#9,80(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,80(<input_0=%rdi)
movdqu % xmm8, 80( % rdi)
# qhasm: mem128[ input_0 + 96 ] = x6
# asm 1: movdqu <x6=reg128#13,96(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,96(<input_0=%rdi)
movdqu % xmm12, 96( % rdi)
# qhasm: mem128[ input_0 + 112 ] = x7
# asm 1: movdqu <x7=reg128#7,112(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,112(<input_0=%rdi)
movdqu % xmm6, 112( % rdi)
# qhasm: x0 = mem128[ input_0 + 128 ]
# asm 1: movdqu 128(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 128(<input_0=%rdi),>x0=%xmm6
movdqu 128( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 144 ]
# asm 1: movdqu 144(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 144(<input_0=%rdi),>x1=%xmm7
movdqu 144( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 160 ]
# asm 1: movdqu 160(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 160(<input_0=%rdi),>x2=%xmm8
movdqu 160( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 176 ]
# asm 1: movdqu 176(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 176(<input_0=%rdi),>x3=%xmm9
movdqu 176( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 192 ]
# asm 1: movdqu 192(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 192(<input_0=%rdi),>x4=%xmm10
movdqu 192( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 208 ]
# asm 1: movdqu 208(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 208(<input_0=%rdi),>x5=%xmm11
movdqu 208( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 224 ]
# asm 1: movdqu 224(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 224(<input_0=%rdi),>x6=%xmm12
movdqu 224( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 240 ]
# asm 1: movdqu 240(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 240(<input_0=%rdi),>x7=%xmm13
movdqu 240( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 128 ] = x0
# asm 1: movdqu <x0=reg128#10,128(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,128(<input_0=%rdi)
movdqu % xmm9, 128( % rdi)
# qhasm: mem128[ input_0 + 144 ] = x1
# asm 1: movdqu <x1=reg128#14,144(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,144(<input_0=%rdi)
movdqu % xmm13, 144( % rdi)
# qhasm: mem128[ input_0 + 160 ] = x2
# asm 1: movdqu <x2=reg128#15,160(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,160(<input_0=%rdi)
movdqu % xmm14, 160( % rdi)
# qhasm: mem128[ input_0 + 176 ] = x3
# asm 1: movdqu <x3=reg128#11,176(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,176(<input_0=%rdi)
movdqu % xmm10, 176( % rdi)
# qhasm: mem128[ input_0 + 192 ] = x4
# asm 1: movdqu <x4=reg128#12,192(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,192(<input_0=%rdi)
movdqu % xmm11, 192( % rdi)
# qhasm: mem128[ input_0 + 208 ] = x5
# asm 1: movdqu <x5=reg128#9,208(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,208(<input_0=%rdi)
movdqu % xmm8, 208( % rdi)
# qhasm: mem128[ input_0 + 224 ] = x6
# asm 1: movdqu <x6=reg128#13,224(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,224(<input_0=%rdi)
movdqu % xmm12, 224( % rdi)
# qhasm: mem128[ input_0 + 240 ] = x7
# asm 1: movdqu <x7=reg128#7,240(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,240(<input_0=%rdi)
movdqu % xmm6, 240( % rdi)
# qhasm: x0 = mem128[ input_0 + 256 ]
# asm 1: movdqu 256(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 256(<input_0=%rdi),>x0=%xmm6
movdqu 256( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 272 ]
# asm 1: movdqu 272(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 272(<input_0=%rdi),>x1=%xmm7
movdqu 272( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 288 ]
# asm 1: movdqu 288(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 288(<input_0=%rdi),>x2=%xmm8
movdqu 288( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 304 ]
# asm 1: movdqu 304(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 304(<input_0=%rdi),>x3=%xmm9
movdqu 304( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 320 ]
# asm 1: movdqu 320(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 320(<input_0=%rdi),>x4=%xmm10
movdqu 320( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 336 ]
# asm 1: movdqu 336(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 336(<input_0=%rdi),>x5=%xmm11
movdqu 336( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 352 ]
# asm 1: movdqu 352(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 352(<input_0=%rdi),>x6=%xmm12
movdqu 352( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 368 ]
# asm 1: movdqu 368(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 368(<input_0=%rdi),>x7=%xmm13
movdqu 368( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 256 ] = x0
# asm 1: movdqu <x0=reg128#10,256(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,256(<input_0=%rdi)
movdqu % xmm9, 256( % rdi)
# qhasm: mem128[ input_0 + 272 ] = x1
# asm 1: movdqu <x1=reg128#14,272(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,272(<input_0=%rdi)
movdqu % xmm13, 272( % rdi)
# qhasm: mem128[ input_0 + 288 ] = x2
# asm 1: movdqu <x2=reg128#15,288(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,288(<input_0=%rdi)
movdqu % xmm14, 288( % rdi)
# qhasm: mem128[ input_0 + 304 ] = x3
# asm 1: movdqu <x3=reg128#11,304(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,304(<input_0=%rdi)
movdqu % xmm10, 304( % rdi)
# qhasm: mem128[ input_0 + 320 ] = x4
# asm 1: movdqu <x4=reg128#12,320(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,320(<input_0=%rdi)
movdqu % xmm11, 320( % rdi)
# qhasm: mem128[ input_0 + 336 ] = x5
# asm 1: movdqu <x5=reg128#9,336(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,336(<input_0=%rdi)
movdqu % xmm8, 336( % rdi)
# qhasm: mem128[ input_0 + 352 ] = x6
# asm 1: movdqu <x6=reg128#13,352(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,352(<input_0=%rdi)
movdqu % xmm12, 352( % rdi)
# qhasm: mem128[ input_0 + 368 ] = x7
# asm 1: movdqu <x7=reg128#7,368(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,368(<input_0=%rdi)
movdqu % xmm6, 368( % rdi)
# qhasm: x0 = mem128[ input_0 + 384 ]
# asm 1: movdqu 384(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 384(<input_0=%rdi),>x0=%xmm6
movdqu 384( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 400 ]
# asm 1: movdqu 400(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 400(<input_0=%rdi),>x1=%xmm7
movdqu 400( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 416 ]
# asm 1: movdqu 416(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 416(<input_0=%rdi),>x2=%xmm8
movdqu 416( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 432 ]
# asm 1: movdqu 432(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 432(<input_0=%rdi),>x3=%xmm9
movdqu 432( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 448 ]
# asm 1: movdqu 448(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 448(<input_0=%rdi),>x4=%xmm10
movdqu 448( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 464 ]
# asm 1: movdqu 464(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 464(<input_0=%rdi),>x5=%xmm11
movdqu 464( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 480 ]
# asm 1: movdqu 480(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 480(<input_0=%rdi),>x6=%xmm12
movdqu 480( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 496 ]
# asm 1: movdqu 496(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 496(<input_0=%rdi),>x7=%xmm13
movdqu 496( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 384 ] = x0
# asm 1: movdqu <x0=reg128#10,384(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,384(<input_0=%rdi)
movdqu % xmm9, 384( % rdi)
# qhasm: mem128[ input_0 + 400 ] = x1
# asm 1: movdqu <x1=reg128#14,400(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,400(<input_0=%rdi)
movdqu % xmm13, 400( % rdi)
# qhasm: mem128[ input_0 + 416 ] = x2
# asm 1: movdqu <x2=reg128#15,416(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,416(<input_0=%rdi)
movdqu % xmm14, 416( % rdi)
# qhasm: mem128[ input_0 + 432 ] = x3
# asm 1: movdqu <x3=reg128#11,432(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,432(<input_0=%rdi)
movdqu % xmm10, 432( % rdi)
# qhasm: mem128[ input_0 + 448 ] = x4
# asm 1: movdqu <x4=reg128#12,448(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,448(<input_0=%rdi)
movdqu % xmm11, 448( % rdi)
# qhasm: mem128[ input_0 + 464 ] = x5
# asm 1: movdqu <x5=reg128#9,464(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,464(<input_0=%rdi)
movdqu % xmm8, 464( % rdi)
# qhasm: mem128[ input_0 + 480 ] = x6
# asm 1: movdqu <x6=reg128#13,480(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,480(<input_0=%rdi)
movdqu % xmm12, 480( % rdi)
# qhasm: mem128[ input_0 + 496 ] = x7
# asm 1: movdqu <x7=reg128#7,496(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,496(<input_0=%rdi)
movdqu % xmm6, 496( % rdi)
# qhasm: x0 = mem128[ input_0 + 512 ]
# asm 1: movdqu 512(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 512(<input_0=%rdi),>x0=%xmm6
movdqu 512( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 528 ]
# asm 1: movdqu 528(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 528(<input_0=%rdi),>x1=%xmm7
movdqu 528( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 544 ]
# asm 1: movdqu 544(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 544(<input_0=%rdi),>x2=%xmm8
movdqu 544( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 560 ]
# asm 1: movdqu 560(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 560(<input_0=%rdi),>x3=%xmm9
movdqu 560( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 576 ]
# asm 1: movdqu 576(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 576(<input_0=%rdi),>x4=%xmm10
movdqu 576( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 592 ]
# asm 1: movdqu 592(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 592(<input_0=%rdi),>x5=%xmm11
movdqu 592( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 608 ]
# asm 1: movdqu 608(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 608(<input_0=%rdi),>x6=%xmm12
movdqu 608( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 624 ]
# asm 1: movdqu 624(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 624(<input_0=%rdi),>x7=%xmm13
movdqu 624( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 512 ] = x0
# asm 1: movdqu <x0=reg128#10,512(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,512(<input_0=%rdi)
movdqu % xmm9, 512( % rdi)
# qhasm: mem128[ input_0 + 528 ] = x1
# asm 1: movdqu <x1=reg128#14,528(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,528(<input_0=%rdi)
movdqu % xmm13, 528( % rdi)
# qhasm: mem128[ input_0 + 544 ] = x2
# asm 1: movdqu <x2=reg128#15,544(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,544(<input_0=%rdi)
movdqu % xmm14, 544( % rdi)
# qhasm: mem128[ input_0 + 560 ] = x3
# asm 1: movdqu <x3=reg128#11,560(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,560(<input_0=%rdi)
movdqu % xmm10, 560( % rdi)
# qhasm: mem128[ input_0 + 576 ] = x4
# asm 1: movdqu <x4=reg128#12,576(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,576(<input_0=%rdi)
movdqu % xmm11, 576( % rdi)
# qhasm: mem128[ input_0 + 592 ] = x5
# asm 1: movdqu <x5=reg128#9,592(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,592(<input_0=%rdi)
movdqu % xmm8, 592( % rdi)
# qhasm: mem128[ input_0 + 608 ] = x6
# asm 1: movdqu <x6=reg128#13,608(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,608(<input_0=%rdi)
movdqu % xmm12, 608( % rdi)
# qhasm: mem128[ input_0 + 624 ] = x7
# asm 1: movdqu <x7=reg128#7,624(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,624(<input_0=%rdi)
movdqu % xmm6, 624( % rdi)
# qhasm: x0 = mem128[ input_0 + 640 ]
# asm 1: movdqu 640(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 640(<input_0=%rdi),>x0=%xmm6
movdqu 640( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 656 ]
# asm 1: movdqu 656(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 656(<input_0=%rdi),>x1=%xmm7
movdqu 656( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 672 ]
# asm 1: movdqu 672(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 672(<input_0=%rdi),>x2=%xmm8
movdqu 672( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 688 ]
# asm 1: movdqu 688(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 688(<input_0=%rdi),>x3=%xmm9
movdqu 688( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 704 ]
# asm 1: movdqu 704(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 704(<input_0=%rdi),>x4=%xmm10
movdqu 704( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 720 ]
# asm 1: movdqu 720(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 720(<input_0=%rdi),>x5=%xmm11
movdqu 720( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 736 ]
# asm 1: movdqu 736(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 736(<input_0=%rdi),>x6=%xmm12
movdqu 736( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 752 ]
# asm 1: movdqu 752(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 752(<input_0=%rdi),>x7=%xmm13
movdqu 752( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 640 ] = x0
# asm 1: movdqu <x0=reg128#10,640(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,640(<input_0=%rdi)
movdqu % xmm9, 640( % rdi)
# qhasm: mem128[ input_0 + 656 ] = x1
# asm 1: movdqu <x1=reg128#14,656(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,656(<input_0=%rdi)
movdqu % xmm13, 656( % rdi)
# qhasm: mem128[ input_0 + 672 ] = x2
# asm 1: movdqu <x2=reg128#15,672(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,672(<input_0=%rdi)
movdqu % xmm14, 672( % rdi)
# qhasm: mem128[ input_0 + 688 ] = x3
# asm 1: movdqu <x3=reg128#11,688(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,688(<input_0=%rdi)
movdqu % xmm10, 688( % rdi)
# qhasm: mem128[ input_0 + 704 ] = x4
# asm 1: movdqu <x4=reg128#12,704(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,704(<input_0=%rdi)
movdqu % xmm11, 704( % rdi)
# qhasm: mem128[ input_0 + 720 ] = x5
# asm 1: movdqu <x5=reg128#9,720(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,720(<input_0=%rdi)
movdqu % xmm8, 720( % rdi)
# qhasm: mem128[ input_0 + 736 ] = x6
# asm 1: movdqu <x6=reg128#13,736(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,736(<input_0=%rdi)
movdqu % xmm12, 736( % rdi)
# qhasm: mem128[ input_0 + 752 ] = x7
# asm 1: movdqu <x7=reg128#7,752(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,752(<input_0=%rdi)
movdqu % xmm6, 752( % rdi)
# qhasm: x0 = mem128[ input_0 + 768 ]
# asm 1: movdqu 768(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 768(<input_0=%rdi),>x0=%xmm6
movdqu 768( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 784 ]
# asm 1: movdqu 784(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 784(<input_0=%rdi),>x1=%xmm7
movdqu 784( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 800 ]
# asm 1: movdqu 800(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 800(<input_0=%rdi),>x2=%xmm8
movdqu 800( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 816 ]
# asm 1: movdqu 816(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 816(<input_0=%rdi),>x3=%xmm9
movdqu 816( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 832 ]
# asm 1: movdqu 832(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 832(<input_0=%rdi),>x4=%xmm10
movdqu 832( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 848 ]
# asm 1: movdqu 848(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 848(<input_0=%rdi),>x5=%xmm11
movdqu 848( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 864 ]
# asm 1: movdqu 864(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 864(<input_0=%rdi),>x6=%xmm12
movdqu 864( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 880 ]
# asm 1: movdqu 880(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 880(<input_0=%rdi),>x7=%xmm13
movdqu 880( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm15
vpand % xmm0, % xmm13, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#14
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm13
vpand % xmm1, % xmm13, % xmm13
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x3=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x3=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>x7=reg128#10
# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>x7=%xmm9
vpor % xmm13, % xmm9, % xmm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#14
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm13
vpand % xmm2, % xmm14, % xmm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm15
vpand % xmm2, % xmm11, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#15
# asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm14
vpand % xmm3, % xmm14, % xmm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#15
# asm 2: psrlq $2,<v01=%xmm14
psrlq $2, % xmm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>x0=reg128#14
# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>x0=%xmm13
vpor % xmm15, % xmm13, % xmm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>x2=%xmm11
vpor % xmm11, % xmm14, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#15
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm14
vpand % xmm2, % xmm10, % xmm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <mask2=reg128#3,<x3=reg128#13,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x3=%xmm12,>v10=%xmm15
vpand % xmm2, % xmm12, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#13,>v11=reg128#13
# asm 2: vpand <mask3=%xmm3,<x3=%xmm12,>v11=%xmm12
vpand % xmm3, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x1=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x1=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>x3=reg128#11
# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>x3=%xmm10
vpor % xmm12, % xmm10, % xmm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm12
vpand % xmm2, % xmm6, % xmm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm15
vpand % xmm2, % xmm8, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x4=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x4=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <mask2=reg128#3,<x7=reg128#10,>v10=reg128#16
# asm 2: vpand <mask2=%xmm2,<x7=%xmm9,>v10=%xmm15
vpand % xmm2, % xmm9, % xmm15
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#16
# asm 2: psllq $2,<v10=%xmm15
psllq $2, % xmm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#10,>v11=reg128#10
# asm 2: vpand <mask3=%xmm3,<x7=%xmm9,>v11=%xmm9
vpand % xmm3, % xmm9, % xmm9
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>x5=reg128#9
# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>x5=%xmm8
vpor % xmm15, % xmm8, % xmm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>x7=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>x7=%xmm7
vpor % xmm9, % xmm7, % xmm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#14,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x0=%xmm13,>v00=%xmm9
vpand % xmm4, % xmm13, % xmm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <mask4=reg128#5,<x1=reg128#15,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x1=%xmm14,>v10=%xmm15
vpand % xmm4, % xmm14, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <mask5=reg128#6,<x0=reg128#14,>v01=reg128#14
# asm 2: vpand <mask5=%xmm5,<x0=%xmm13,>v01=%xmm13
vpand % xmm5, % xmm13, % xmm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#15,>v11=reg128#15
# asm 2: vpand <mask5=%xmm5,<x1=%xmm14,>v11=%xmm14
vpand % xmm5, % xmm14, % xmm14
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#14
# asm 2: psrlq $1,<v01=%xmm13
psrlq $1, % xmm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>x0=%xmm9
vpor % xmm15, % xmm9, % xmm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>x1=reg128#14
# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>x1=%xmm13
vpor % xmm14, % xmm13, % xmm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#15
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm14
vpand % xmm4, % xmm11, % xmm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <mask4=reg128#5,<x3=reg128#11,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x3=%xmm10,>v10=%xmm15
vpand % xmm4, % xmm10, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#11,>v11=reg128#11
# asm 2: vpand <mask5=%xmm5,<x3=%xmm10,>v11=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x2=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x2=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>x3=reg128#11
# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>x3=%xmm10
vpor % xmm10, % xmm11, % xmm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#13,>v00=reg128#12
# asm 2: vpand <mask4=%xmm4,<x4=%xmm12,>v00=%xmm11
vpand % xmm4, % xmm12, % xmm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <mask4=reg128#5,<x5=reg128#9,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x5=%xmm8,>v10=%xmm15
vpand % xmm4, % xmm8, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <mask5=reg128#6,<x4=reg128#13,>v01=reg128#13
# asm 2: vpand <mask5=%xmm5,<x4=%xmm12,>v01=%xmm12
vpand % xmm5, % xmm12, % xmm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#9,>v11=reg128#9
# asm 2: vpand <mask5=%xmm5,<x5=%xmm8,>v11=%xmm8
vpand % xmm5, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#13
# asm 2: psrlq $1,<v01=%xmm12
psrlq $1, % xmm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x4=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x4=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>x5=reg128#9
# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>x5=%xmm8
vpor % xmm8, % xmm12, % xmm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#13
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm12
vpand % xmm4, % xmm6, % xmm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <mask4=reg128#5,<x7=reg128#8,>v10=reg128#16
# asm 2: vpand <mask4=%xmm4,<x7=%xmm7,>v10=%xmm15
vpand % xmm4, % xmm7, % xmm15
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#16
# asm 2: psllq $1,<v10=%xmm15
psllq $1, % xmm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#8,>v11=reg128#8
# asm 2: vpand <mask5=%xmm5,<x7=%xmm7,>v11=%xmm7
vpand % xmm5, % xmm7, % xmm7
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>x6=reg128#13
# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>x6=%xmm12
vpor % xmm15, % xmm12, % xmm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>x7=reg128#7
# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>x7=%xmm6
vpor % xmm7, % xmm6, % xmm6
# qhasm: mem128[ input_0 + 768 ] = x0
# asm 1: movdqu <x0=reg128#10,768(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm9,768(<input_0=%rdi)
movdqu % xmm9, 768( % rdi)
# qhasm: mem128[ input_0 + 784 ] = x1
# asm 1: movdqu <x1=reg128#14,784(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm13,784(<input_0=%rdi)
movdqu % xmm13, 784( % rdi)
# qhasm: mem128[ input_0 + 800 ] = x2
# asm 1: movdqu <x2=reg128#15,800(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm14,800(<input_0=%rdi)
movdqu % xmm14, 800( % rdi)
# qhasm: mem128[ input_0 + 816 ] = x3
# asm 1: movdqu <x3=reg128#11,816(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm10,816(<input_0=%rdi)
movdqu % xmm10, 816( % rdi)
# qhasm: mem128[ input_0 + 832 ] = x4
# asm 1: movdqu <x4=reg128#12,832(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm11,832(<input_0=%rdi)
movdqu % xmm11, 832( % rdi)
# qhasm: mem128[ input_0 + 848 ] = x5
# asm 1: movdqu <x5=reg128#9,848(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm8,848(<input_0=%rdi)
movdqu % xmm8, 848( % rdi)
# qhasm: mem128[ input_0 + 864 ] = x6
# asm 1: movdqu <x6=reg128#13,864(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm12,864(<input_0=%rdi)
movdqu % xmm12, 864( % rdi)
# qhasm: mem128[ input_0 + 880 ] = x7
# asm 1: movdqu <x7=reg128#7,880(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm6,880(<input_0=%rdi)
movdqu % xmm6, 880( % rdi)
# qhasm: x0 = mem128[ input_0 + 896 ]
# asm 1: movdqu 896(<input_0=int64#1),>x0=reg128#7
# asm 2: movdqu 896(<input_0=%rdi),>x0=%xmm6
movdqu 896( % rdi), % xmm6
# qhasm: x1 = mem128[ input_0 + 912 ]
# asm 1: movdqu 912(<input_0=int64#1),>x1=reg128#8
# asm 2: movdqu 912(<input_0=%rdi),>x1=%xmm7
movdqu 912( % rdi), % xmm7
# qhasm: x2 = mem128[ input_0 + 928 ]
# asm 1: movdqu 928(<input_0=int64#1),>x2=reg128#9
# asm 2: movdqu 928(<input_0=%rdi),>x2=%xmm8
movdqu 928( % rdi), % xmm8
# qhasm: x3 = mem128[ input_0 + 944 ]
# asm 1: movdqu 944(<input_0=int64#1),>x3=reg128#10
# asm 2: movdqu 944(<input_0=%rdi),>x3=%xmm9
movdqu 944( % rdi), % xmm9
# qhasm: x4 = mem128[ input_0 + 960 ]
# asm 1: movdqu 960(<input_0=int64#1),>x4=reg128#11
# asm 2: movdqu 960(<input_0=%rdi),>x4=%xmm10
movdqu 960( % rdi), % xmm10
# qhasm: x5 = mem128[ input_0 + 976 ]
# asm 1: movdqu 976(<input_0=int64#1),>x5=reg128#12
# asm 2: movdqu 976(<input_0=%rdi),>x5=%xmm11
movdqu 976( % rdi), % xmm11
# qhasm: x6 = mem128[ input_0 + 992 ]
# asm 1: movdqu 992(<input_0=int64#1),>x6=reg128#13
# asm 2: movdqu 992(<input_0=%rdi),>x6=%xmm12
movdqu 992( % rdi), % xmm12
# qhasm: x7 = mem128[ input_0 + 1008 ]
# asm 1: movdqu 1008(<input_0=int64#1),>x7=reg128#14
# asm 2: movdqu 1008(<input_0=%rdi),>x7=%xmm13
movdqu 1008( % rdi), % xmm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <mask0=reg128#1,<x0=reg128#7,>v00=reg128#15
# asm 2: vpand <mask0=%xmm0,<x0=%xmm6,>v00=%xmm14
vpand % xmm0, % xmm6, % xmm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <mask0=reg128#1,<x4=reg128#11,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x4=%xmm10,>v10=%xmm15
vpand % xmm0, % xmm10, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <mask1=reg128#2,<x0=reg128#7,>v01=reg128#7
# asm 2: vpand <mask1=%xmm1,<x0=%xmm6,>v01=%xmm6
vpand % xmm1, % xmm6, % xmm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <mask1=reg128#2,<x4=reg128#11,>v11=reg128#11
# asm 2: vpand <mask1=%xmm1,<x4=%xmm10,>v11=%xmm10
vpand % xmm1, % xmm10, % xmm10
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#7
# asm 2: psrlq $4,<v01=%xmm6
psrlq $4, % xmm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>x0=reg128#15
# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>x0=%xmm14
vpor % xmm15, % xmm14, % xmm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>x4=reg128#7
# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>x4=%xmm6
vpor % xmm10, % xmm6, % xmm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <mask0=reg128#1,<x1=reg128#8,>v00=reg128#11
# asm 2: vpand <mask0=%xmm0,<x1=%xmm7,>v00=%xmm10
vpand % xmm0, % xmm7, % xmm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <mask0=reg128#1,<x5=reg128#12,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x5=%xmm11,>v10=%xmm15
vpand % xmm0, % xmm11, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <mask1=reg128#2,<x1=reg128#8,>v01=reg128#8
# asm 2: vpand <mask1=%xmm1,<x1=%xmm7,>v01=%xmm7
vpand % xmm1, % xmm7, % xmm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <mask1=reg128#2,<x5=reg128#12,>v11=reg128#12
# asm 2: vpand <mask1=%xmm1,<x5=%xmm11,>v11=%xmm11
vpand % xmm1, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#8
# asm 2: psrlq $4,<v01=%xmm7
psrlq $4, % xmm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>x1=reg128#11
# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>x1=%xmm10
vpor % xmm15, % xmm10, % xmm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>x5=reg128#8
# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>x5=%xmm7
vpor % xmm11, % xmm7, % xmm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <mask0=reg128#1,<x2=reg128#9,>v00=reg128#12
# asm 2: vpand <mask0=%xmm0,<x2=%xmm8,>v00=%xmm11
vpand % xmm0, % xmm8, % xmm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <mask0=reg128#1,<x6=reg128#13,>v10=reg128#16
# asm 2: vpand <mask0=%xmm0,<x6=%xmm12,>v10=%xmm15
vpand % xmm0, % xmm12, % xmm15
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#16
# asm 2: psllq $4,<v10=%xmm15
psllq $4, % xmm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <mask1=reg128#2,<x2=reg128#9,>v01=reg128#9
# asm 2: vpand <mask1=%xmm1,<x2=%xmm8,>v01=%xmm8
vpand % xmm1, % xmm8, % xmm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <mask1=reg128#2,<x6=reg128#13,>v11=reg128#13
# asm 2: vpand <mask1=%xmm1,<x6=%xmm12,>v11=%xmm12
vpand % xmm1, % xmm12, % xmm12
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#9
# asm 2: psrlq $4,<v01=%xmm8
psrlq $4, % xmm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>x2=reg128#12
# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>x2=%xmm11
vpor % xmm15, % xmm11, % xmm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>x6=reg128#9
# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>x6=%xmm8
vpor % xmm12, % xmm8, % xmm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <mask0=reg128#1,<x3=reg128#10,>v00=reg128#13
# asm 2: vpand <mask0=%xmm0,<x3=%xmm9,>v00=%xmm12
vpand % xmm0, % xmm9, % xmm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <mask0=reg128#1,<x7=reg128#14,>v10=reg128#1
# asm 2: vpand <mask0=%xmm0,<x7=%xmm13,>v10=%xmm0
vpand % xmm0, % xmm13, % xmm0
# qhasm: 2x v10 <<= 4
# asm 1: psllq $4,<v10=reg128#1
# asm 2: psllq $4,<v10=%xmm0
psllq $4, % xmm0
# qhasm: v01 = x3 & mask1
# asm 1: vpand <mask1=reg128#2,<x3=reg128#10,>v01=reg128#10
# asm 2: vpand <mask1=%xmm1,<x3=%xmm9,>v01=%xmm9
vpand % xmm1, % xmm9, % xmm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <mask1=reg128#2,<x7=reg128#14,>v11=reg128#2
# asm 2: vpand <mask1=%xmm1,<x7=%xmm13,>v11=%xmm1
vpand % xmm1, % xmm13, % xmm1
# qhasm: 2x v01 unsigned>>= 4
# asm 1: psrlq $4,<v01=reg128#10
# asm 2: psrlq $4,<v01=%xmm9
psrlq $4, % xmm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v10=reg128#1,<v00=reg128#13,>x3=reg128#1
# asm 2: vpor <v10=%xmm0,<v00=%xmm12,>x3=%xmm0
vpor % xmm0, % xmm12, % xmm0
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#10,>x7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm9,>x7=%xmm1
vpor % xmm1, % xmm9, % xmm1
# qhasm: v00 = x0 & mask2
# asm 1: vpand <mask2=reg128#3,<x0=reg128#15,>v00=reg128#10
# asm 2: vpand <mask2=%xmm2,<x0=%xmm14,>v00=%xmm9
vpand % xmm2, % xmm14, % xmm9
# qhasm: v10 = x2 & mask2
# asm 1: vpand <mask2=reg128#3,<x2=reg128#12,>v10=reg128#13
# asm 2: vpand <mask2=%xmm2,<x2=%xmm11,>v10=%xmm12
vpand % xmm2, % xmm11, % xmm12
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#13
# asm 2: psllq $2,<v10=%xmm12
psllq $2, % xmm12
# qhasm: v01 = x0 & mask3
# asm 1: vpand <mask3=reg128#4,<x0=reg128#15,>v01=reg128#14
# asm 2: vpand <mask3=%xmm3,<x0=%xmm14,>v01=%xmm13
vpand % xmm3, % xmm14, % xmm13
# qhasm: v11 = x2 & mask3
# asm 1: vpand <mask3=reg128#4,<x2=reg128#12,>v11=reg128#12
# asm 2: vpand <mask3=%xmm3,<x2=%xmm11,>v11=%xmm11
vpand % xmm3, % xmm11, % xmm11
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#14
# asm 2: psrlq $2,<v01=%xmm13
psrlq $2, % xmm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#13,<v00=reg128#10,>x0=reg128#10
# asm 2: vpor <v10=%xmm12,<v00=%xmm9,>x0=%xmm9
vpor % xmm12, % xmm9, % xmm9
# qhasm: x2 = v01 | v11
# asm 1: vpor <v11=reg128#12,<v01=reg128#14,>x2=reg128#12
# asm 2: vpor <v11=%xmm11,<v01=%xmm13,>x2=%xmm11
vpor % xmm11, % xmm13, % xmm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <mask2=reg128#3,<x1=reg128#11,>v00=reg128#13
# asm 2: vpand <mask2=%xmm2,<x1=%xmm10,>v00=%xmm12
vpand % xmm2, % xmm10, % xmm12
# qhasm: v10 = x3 & mask2
# asm 1: vpand <mask2=reg128#3,<x3=reg128#1,>v10=reg128#14
# asm 2: vpand <mask2=%xmm2,<x3=%xmm0,>v10=%xmm13
vpand % xmm2, % xmm0, % xmm13
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#14
# asm 2: psllq $2,<v10=%xmm13
psllq $2, % xmm13
# qhasm: v01 = x1 & mask3
# asm 1: vpand <mask3=reg128#4,<x1=reg128#11,>v01=reg128#11
# asm 2: vpand <mask3=%xmm3,<x1=%xmm10,>v01=%xmm10
vpand % xmm3, % xmm10, % xmm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <mask3=reg128#4,<x3=reg128#1,>v11=reg128#1
# asm 2: vpand <mask3=%xmm3,<x3=%xmm0,>v11=%xmm0
vpand % xmm3, % xmm0, % xmm0
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#11
# asm 2: psrlq $2,<v01=%xmm10
psrlq $2, % xmm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v10=reg128#14,<v00=reg128#13,>x1=reg128#13
# asm 2: vpor <v10=%xmm13,<v00=%xmm12,>x1=%xmm12
vpor % xmm13, % xmm12, % xmm12
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#1,<v01=reg128#11,>x3=reg128#1
# asm 2: vpor <v11=%xmm0,<v01=%xmm10,>x3=%xmm0
vpor % xmm0, % xmm10, % xmm0
# qhasm: v00 = x4 & mask2
# asm 1: vpand <mask2=reg128#3,<x4=reg128#7,>v00=reg128#11
# asm 2: vpand <mask2=%xmm2,<x4=%xmm6,>v00=%xmm10
vpand % xmm2, % xmm6, % xmm10
# qhasm: v10 = x6 & mask2
# asm 1: vpand <mask2=reg128#3,<x6=reg128#9,>v10=reg128#14
# asm 2: vpand <mask2=%xmm2,<x6=%xmm8,>v10=%xmm13
vpand % xmm2, % xmm8, % xmm13
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#14
# asm 2: psllq $2,<v10=%xmm13
psllq $2, % xmm13
# qhasm: v01 = x4 & mask3
# asm 1: vpand <mask3=reg128#4,<x4=reg128#7,>v01=reg128#7
# asm 2: vpand <mask3=%xmm3,<x4=%xmm6,>v01=%xmm6
vpand % xmm3, % xmm6, % xmm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <mask3=reg128#4,<x6=reg128#9,>v11=reg128#9
# asm 2: vpand <mask3=%xmm3,<x6=%xmm8,>v11=%xmm8
vpand % xmm3, % xmm8, % xmm8
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#7
# asm 2: psrlq $2,<v01=%xmm6
psrlq $2, % xmm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#14,<v00=reg128#11,>x4=reg128#11
# asm 2: vpor <v10=%xmm13,<v00=%xmm10,>x4=%xmm10
vpor % xmm13, % xmm10, % xmm10
# qhasm: x6 = v01 | v11
# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>x6=reg128#7
# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>x6=%xmm6
vpor % xmm8, % xmm6, % xmm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <mask2=reg128#3,<x5=reg128#8,>v00=reg128#9
# asm 2: vpand <mask2=%xmm2,<x5=%xmm7,>v00=%xmm8
vpand % xmm2, % xmm7, % xmm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <mask2=reg128#3,<x7=reg128#2,>v10=reg128#3
# asm 2: vpand <mask2=%xmm2,<x7=%xmm1,>v10=%xmm2
vpand % xmm2, % xmm1, % xmm2
# qhasm: 2x v10 <<= 2
# asm 1: psllq $2,<v10=reg128#3
# asm 2: psllq $2,<v10=%xmm2
psllq $2, % xmm2
# qhasm: v01 = x5 & mask3
# asm 1: vpand <mask3=reg128#4,<x5=reg128#8,>v01=reg128#8
# asm 2: vpand <mask3=%xmm3,<x5=%xmm7,>v01=%xmm7
vpand % xmm3, % xmm7, % xmm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <mask3=reg128#4,<x7=reg128#2,>v11=reg128#2
# asm 2: vpand <mask3=%xmm3,<x7=%xmm1,>v11=%xmm1
vpand % xmm3, % xmm1, % xmm1
# qhasm: 2x v01 unsigned>>= 2
# asm 1: psrlq $2,<v01=reg128#8
# asm 2: psrlq $2,<v01=%xmm7
psrlq $2, % xmm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v10=reg128#3,<v00=reg128#9,>x5=reg128#3
# asm 2: vpor <v10=%xmm2,<v00=%xmm8,>x5=%xmm2
vpor % xmm2, % xmm8, % xmm2
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#8,>x7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm7,>x7=%xmm1
vpor % xmm1, % xmm7, % xmm1
# qhasm: v00 = x0 & mask4
# asm 1: vpand <mask4=reg128#5,<x0=reg128#10,>v00=reg128#4
# asm 2: vpand <mask4=%xmm4,<x0=%xmm9,>v00=%xmm3
vpand % xmm4, % xmm9, % xmm3
# qhasm: v10 = x1 & mask4
# asm 1: vpand <mask4=reg128#5,<x1=reg128#13,>v10=reg128#8
# asm 2: vpand <mask4=%xmm4,<x1=%xmm12,>v10=%xmm7
vpand % xmm4, % xmm12, % xmm7
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#8
# asm 2: psllq $1,<v10=%xmm7
psllq $1, % xmm7
# qhasm: v01 = x0 & mask5
# asm 1: vpand <mask5=reg128#6,<x0=reg128#10,>v01=reg128#9
# asm 2: vpand <mask5=%xmm5,<x0=%xmm9,>v01=%xmm8
vpand % xmm5, % xmm9, % xmm8
# qhasm: v11 = x1 & mask5
# asm 1: vpand <mask5=reg128#6,<x1=reg128#13,>v11=reg128#10
# asm 2: vpand <mask5=%xmm5,<x1=%xmm12,>v11=%xmm9
vpand % xmm5, % xmm12, % xmm9
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#9
# asm 2: psrlq $1,<v01=%xmm8
psrlq $1, % xmm8
# qhasm: x0 = v00 | v10
# asm 1: vpor <v10=reg128#8,<v00=reg128#4,>x0=reg128#4
# asm 2: vpor <v10=%xmm7,<v00=%xmm3,>x0=%xmm3
vpor % xmm7, % xmm3, % xmm3
# qhasm: x1 = v01 | v11
# asm 1: vpor <v11=reg128#10,<v01=reg128#9,>x1=reg128#8
# asm 2: vpor <v11=%xmm9,<v01=%xmm8,>x1=%xmm7
vpor % xmm9, % xmm8, % xmm7
# qhasm: v00 = x2 & mask4
# asm 1: vpand <mask4=reg128#5,<x2=reg128#12,>v00=reg128#9
# asm 2: vpand <mask4=%xmm4,<x2=%xmm11,>v00=%xmm8
vpand % xmm4, % xmm11, % xmm8
# qhasm: v10 = x3 & mask4
# asm 1: vpand <mask4=reg128#5,<x3=reg128#1,>v10=reg128#10
# asm 2: vpand <mask4=%xmm4,<x3=%xmm0,>v10=%xmm9
vpand % xmm4, % xmm0, % xmm9
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#10
# asm 2: psllq $1,<v10=%xmm9
psllq $1, % xmm9
# qhasm: v01 = x2 & mask5
# asm 1: vpand <mask5=reg128#6,<x2=reg128#12,>v01=reg128#12
# asm 2: vpand <mask5=%xmm5,<x2=%xmm11,>v01=%xmm11
vpand % xmm5, % xmm11, % xmm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <mask5=reg128#6,<x3=reg128#1,>v11=reg128#1
# asm 2: vpand <mask5=%xmm5,<x3=%xmm0,>v11=%xmm0
vpand % xmm5, % xmm0, % xmm0
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#12
# asm 2: psrlq $1,<v01=%xmm11
psrlq $1, % xmm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v10=reg128#10,<v00=reg128#9,>x2=reg128#9
# asm 2: vpor <v10=%xmm9,<v00=%xmm8,>x2=%xmm8
vpor % xmm9, % xmm8, % xmm8
# qhasm: x3 = v01 | v11
# asm 1: vpor <v11=reg128#1,<v01=reg128#12,>x3=reg128#1
# asm 2: vpor <v11=%xmm0,<v01=%xmm11,>x3=%xmm0
vpor % xmm0, % xmm11, % xmm0
# qhasm: v00 = x4 & mask4
# asm 1: vpand <mask4=reg128#5,<x4=reg128#11,>v00=reg128#10
# asm 2: vpand <mask4=%xmm4,<x4=%xmm10,>v00=%xmm9
vpand % xmm4, % xmm10, % xmm9
# qhasm: v10 = x5 & mask4
# asm 1: vpand <mask4=reg128#5,<x5=reg128#3,>v10=reg128#12
# asm 2: vpand <mask4=%xmm4,<x5=%xmm2,>v10=%xmm11
vpand % xmm4, % xmm2, % xmm11
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#12
# asm 2: psllq $1,<v10=%xmm11
psllq $1, % xmm11
# qhasm: v01 = x4 & mask5
# asm 1: vpand <mask5=reg128#6,<x4=reg128#11,>v01=reg128#11
# asm 2: vpand <mask5=%xmm5,<x4=%xmm10,>v01=%xmm10
vpand % xmm5, % xmm10, % xmm10
# qhasm: v11 = x5 & mask5
# asm 1: vpand <mask5=reg128#6,<x5=reg128#3,>v11=reg128#3
# asm 2: vpand <mask5=%xmm5,<x5=%xmm2,>v11=%xmm2
vpand % xmm5, % xmm2, % xmm2
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#11
# asm 2: psrlq $1,<v01=%xmm10
psrlq $1, % xmm10
# qhasm: x4 = v00 | v10
# asm 1: vpor <v10=reg128#12,<v00=reg128#10,>x4=reg128#10
# asm 2: vpor <v10=%xmm11,<v00=%xmm9,>x4=%xmm9
vpor % xmm11, % xmm9, % xmm9
# qhasm: x5 = v01 | v11
# asm 1: vpor <v11=reg128#3,<v01=reg128#11,>x5=reg128#3
# asm 2: vpor <v11=%xmm2,<v01=%xmm10,>x5=%xmm2
vpor % xmm2, % xmm10, % xmm2
# qhasm: v00 = x6 & mask4
# asm 1: vpand <mask4=reg128#5,<x6=reg128#7,>v00=reg128#11
# asm 2: vpand <mask4=%xmm4,<x6=%xmm6,>v00=%xmm10
vpand % xmm4, % xmm6, % xmm10
# qhasm: v10 = x7 & mask4
# asm 1: vpand <mask4=reg128#5,<x7=reg128#2,>v10=reg128#5
# asm 2: vpand <mask4=%xmm4,<x7=%xmm1,>v10=%xmm4
vpand % xmm4, % xmm1, % xmm4
# qhasm: 2x v10 <<= 1
# asm 1: psllq $1,<v10=reg128#5
# asm 2: psllq $1,<v10=%xmm4
psllq $1, % xmm4
# qhasm: v01 = x6 & mask5
# asm 1: vpand <mask5=reg128#6,<x6=reg128#7,>v01=reg128#7
# asm 2: vpand <mask5=%xmm5,<x6=%xmm6,>v01=%xmm6
vpand % xmm5, % xmm6, % xmm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <mask5=reg128#6,<x7=reg128#2,>v11=reg128#2
# asm 2: vpand <mask5=%xmm5,<x7=%xmm1,>v11=%xmm1
vpand % xmm5, % xmm1, % xmm1
# qhasm: 2x v01 unsigned>>= 1
# asm 1: psrlq $1,<v01=reg128#7
# asm 2: psrlq $1,<v01=%xmm6
psrlq $1, % xmm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v10=reg128#5,<v00=reg128#11,>x6=reg128#5
# asm 2: vpor <v10=%xmm4,<v00=%xmm10,>x6=%xmm4
vpor % xmm4, % xmm10, % xmm4
# qhasm: x7 = v01 | v11
# asm 1: vpor <v11=reg128#2,<v01=reg128#7,>x7=reg128#2
# asm 2: vpor <v11=%xmm1,<v01=%xmm6,>x7=%xmm1
vpor % xmm1, % xmm6, % xmm1
# qhasm: mem128[ input_0 + 896 ] = x0
# asm 1: movdqu <x0=reg128#4,896(<input_0=int64#1)
# asm 2: movdqu <x0=%xmm3,896(<input_0=%rdi)
movdqu % xmm3, 896( % rdi)
# qhasm: mem128[ input_0 + 912 ] = x1
# asm 1: movdqu <x1=reg128#8,912(<input_0=int64#1)
# asm 2: movdqu <x1=%xmm7,912(<input_0=%rdi)
movdqu % xmm7, 912( % rdi)
# qhasm: mem128[ input_0 + 928 ] = x2
# asm 1: movdqu <x2=reg128#9,928(<input_0=int64#1)
# asm 2: movdqu <x2=%xmm8,928(<input_0=%rdi)
movdqu % xmm8, 928( % rdi)
# qhasm: mem128[ input_0 + 944 ] = x3
# asm 1: movdqu <x3=reg128#1,944(<input_0=int64#1)
# asm 2: movdqu <x3=%xmm0,944(<input_0=%rdi)
movdqu % xmm0, 944( % rdi)
# qhasm: mem128[ input_0 + 960 ] = x4
# asm 1: movdqu <x4=reg128#10,960(<input_0=int64#1)
# asm 2: movdqu <x4=%xmm9,960(<input_0=%rdi)
movdqu % xmm9, 960( % rdi)
# qhasm: mem128[ input_0 + 976 ] = x5
# asm 1: movdqu <x5=reg128#3,976(<input_0=int64#1)
# asm 2: movdqu <x5=%xmm2,976(<input_0=%rdi)
movdqu % xmm2, 976( % rdi)
# qhasm: mem128[ input_0 + 992 ] = x6
# asm 1: movdqu <x6=reg128#5,992(<input_0=int64#1)
# asm 2: movdqu <x6=%xmm4,992(<input_0=%rdi)
movdqu % xmm4, 992( % rdi)
# qhasm: mem128[ input_0 + 1008 ] = x7
# asm 1: movdqu <x7=reg128#2,1008(<input_0=int64#1)
# asm 2: movdqu <x7=%xmm1,1008(<input_0=%rdi)
movdqu % xmm1, 1008( % rdi)
# qhasm: return
add % r11, % rsp
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 69,549
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/vec256_mul_asm.S
|
#include "namespace.h"
#define vec256_mul_asm CRYPTO_NAMESPACE(vec256_mul_asm)
#define _vec256_mul_asm _CRYPTO_NAMESPACE(vec256_mul_asm)
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: reg256 a0
# qhasm: reg256 a1
# qhasm: reg256 a2
# qhasm: reg256 a3
# qhasm: reg256 a4
# qhasm: reg256 a5
# qhasm: reg256 a6
# qhasm: reg256 a7
# qhasm: reg256 a8
# qhasm: reg256 a9
# qhasm: reg256 a10
# qhasm: reg256 a11
# qhasm: reg256 a12
# qhasm: reg256 b0
# qhasm: reg256 b1
# qhasm: reg256 r0
# qhasm: reg256 r1
# qhasm: reg256 r2
# qhasm: reg256 r3
# qhasm: reg256 r4
# qhasm: reg256 r5
# qhasm: reg256 r6
# qhasm: reg256 r7
# qhasm: reg256 r8
# qhasm: reg256 r9
# qhasm: reg256 r10
# qhasm: reg256 r11
# qhasm: reg256 r12
# qhasm: reg256 r13
# qhasm: reg256 r14
# qhasm: reg256 r15
# qhasm: reg256 r16
# qhasm: reg256 r17
# qhasm: reg256 r18
# qhasm: reg256 r19
# qhasm: reg256 r20
# qhasm: reg256 r21
# qhasm: reg256 r22
# qhasm: reg256 r23
# qhasm: reg256 r24
# qhasm: reg256 r
# qhasm: enter vec256_mul_asm
.p2align 5
.global _vec256_mul_asm
.global vec256_mul_asm
_vec256_mul_asm:
vec256_mul_asm:
mov % rsp, % r11
and $31, % r11
add $0, % r11
sub % r11, % rsp
# qhasm: b0 = mem256[ input_2 + 0 ]
# asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1
# asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0
vmovupd 0( % rdx), % ymm0
# qhasm: a12 = mem256[ input_1 + 384 ]
# asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2
# asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1
vmovupd 384( % rsi), % ymm1
# qhasm: r12 = a12 & b0
# asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3
# asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2
vpand % ymm1, % ymm0, % ymm2
# qhasm: r13 = a12 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4
# asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3
vpand 32( % rdx), % ymm1, % ymm3
# qhasm: r14 = a12 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5
# asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4
vpand 64( % rdx), % ymm1, % ymm4
# qhasm: r15 = a12 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6
# asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5
vpand 96( % rdx), % ymm1, % ymm5
# qhasm: r16 = a12 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7
# asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6
vpand 128( % rdx), % ymm1, % ymm6
# qhasm: r17 = a12 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8
# asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7
vpand 160( % rdx), % ymm1, % ymm7
# qhasm: r18 = a12 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9
# asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8
vpand 192( % rdx), % ymm1, % ymm8
# qhasm: r19 = a12 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10
# asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9
vpand 224( % rdx), % ymm1, % ymm9
# qhasm: r20 = a12 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11
# asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10
vpand 256( % rdx), % ymm1, % ymm10
# qhasm: r21 = a12 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12
# asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11
vpand 288( % rdx), % ymm1, % ymm11
# qhasm: r22 = a12 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13
# asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12
vpand 320( % rdx), % ymm1, % ymm12
# qhasm: r23 = a12 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14
# asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13
vpand 352( % rdx), % ymm1, % ymm13
# qhasm: r24 = a12 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2
# asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1
vpand 384( % rdx), % ymm1, % ymm1
# qhasm: r15 ^= r24
# asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5
vpxor % ymm1, % ymm5, % ymm5
# qhasm: r14 ^= r24
# asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4
vpxor % ymm1, % ymm4, % ymm4
# qhasm: r12 ^= r24
# asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2
vpxor % ymm1, % ymm2, % ymm2
# qhasm: r11 = r24
# asm 1: vmovapd <r24=reg256#2,>r11=reg256#2
# asm 2: vmovapd <r24=%ymm1,>r11=%ymm1
vmovapd % ymm1, % ymm1
# qhasm: a11 = mem256[ input_1 + 352 ]
# asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15
# asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14
vmovupd 352( % rsi), % ymm14
# qhasm: r = a11 & b0
# asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a11 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a11 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a11 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a11 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a11 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a11 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a11 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a11 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a11 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a11 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r21 ^= r
# asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12
# asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a11 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r22 ^= r
# asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13
# asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a11 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r23 ^= r
# asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14
# asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13
vpxor % ymm14, % ymm13, % ymm13
# qhasm: r14 ^= r23
# asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4
vpxor % ymm13, % ymm4, % ymm4
# qhasm: r13 ^= r23
# asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3
vpxor % ymm13, % ymm3, % ymm3
# qhasm: r11 ^= r23
# asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1
vpxor % ymm13, % ymm1, % ymm1
# qhasm: r10 = r23
# asm 1: vmovapd <r23=reg256#14,>r10=reg256#14
# asm 2: vmovapd <r23=%ymm13,>r10=%ymm13
vmovapd % ymm13, % ymm13
# qhasm: a10 = mem256[ input_1 + 320 ]
# asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15
# asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14
vmovupd 320( % rsi), % ymm14
# qhasm: r = a10 & b0
# asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a10 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a10 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a10 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a10 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a10 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a10 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a10 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a10 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a10 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a10 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a10 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r21 ^= r
# asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12
# asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a10 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r22 ^= r
# asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13
# asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12
vpxor % ymm14, % ymm12, % ymm12
# qhasm: r13 ^= r22
# asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3
vpxor % ymm12, % ymm3, % ymm3
# qhasm: r12 ^= r22
# asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2
vpxor % ymm12, % ymm2, % ymm2
# qhasm: r10 ^= r22
# asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13
vpxor % ymm12, % ymm13, % ymm13
# qhasm: r9 = r22
# asm 1: vmovapd <r22=reg256#13,>r9=reg256#13
# asm 2: vmovapd <r22=%ymm12,>r9=%ymm12
vmovapd % ymm12, % ymm12
# qhasm: a9 = mem256[ input_1 + 288 ]
# asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15
# asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14
vmovupd 288( % rsi), % ymm14
# qhasm: r = a9 & b0
# asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a9 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a9 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a9 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a9 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a9 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a9 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a9 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a9 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a9 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a9 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a9 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a9 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r21 ^= r
# asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12
# asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11
vpxor % ymm14, % ymm11, % ymm11
# qhasm: r12 ^= r21
# asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2
vpxor % ymm11, % ymm2, % ymm2
# qhasm: r11 ^= r21
# asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1
vpxor % ymm11, % ymm1, % ymm1
# qhasm: r9 ^= r21
# asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12
vpxor % ymm11, % ymm12, % ymm12
# qhasm: r8 = r21
# asm 1: vmovapd <r21=reg256#12,>r8=reg256#12
# asm 2: vmovapd <r21=%ymm11,>r8=%ymm11
vmovapd % ymm11, % ymm11
# qhasm: a8 = mem256[ input_1 + 256 ]
# asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15
# asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14
vmovupd 256( % rsi), % ymm14
# qhasm: r = a8 & b0
# asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a8 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a8 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a8 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a8 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a8 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a8 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a8 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a8 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a8 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a8 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a8 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a8 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10
vpxor % ymm14, % ymm10, % ymm10
# qhasm: r11 ^= r20
# asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1
vpxor % ymm10, % ymm1, % ymm1
# qhasm: r10 ^= r20
# asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13
vpxor % ymm10, % ymm13, % ymm13
# qhasm: r8 ^= r20
# asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11
vpxor % ymm10, % ymm11, % ymm11
# qhasm: r7 = r20
# asm 1: vmovapd <r20=reg256#11,>r7=reg256#11
# asm 2: vmovapd <r20=%ymm10,>r7=%ymm10
vmovapd % ymm10, % ymm10
# qhasm: a7 = mem256[ input_1 + 224 ]
# asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15
# asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14
vmovupd 224( % rsi), % ymm14
# qhasm: r = a7 & b0
# asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a7 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a7 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a7 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a7 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a7 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a7 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a7 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a7 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a7 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a7 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a7 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a7 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9
vpxor % ymm14, % ymm9, % ymm9
# qhasm: r10 ^= r19
# asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13
vpxor % ymm9, % ymm13, % ymm13
# qhasm: r9 ^= r19
# asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12
vpxor % ymm9, % ymm12, % ymm12
# qhasm: r7 ^= r19
# asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10
vpxor % ymm9, % ymm10, % ymm10
# qhasm: r6 = r19
# asm 1: vmovapd <r19=reg256#10,>r6=reg256#10
# asm 2: vmovapd <r19=%ymm9,>r6=%ymm9
vmovapd % ymm9, % ymm9
# qhasm: a6 = mem256[ input_1 + 192 ]
# asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15
# asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14
vmovupd 192( % rsi), % ymm14
# qhasm: r = a6 & b0
# asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a6 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a6 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a6 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a6 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a6 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a6 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a6 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a6 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a6 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a6 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a6 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a6 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8
vpxor % ymm14, % ymm8, % ymm8
# qhasm: r9 ^= r18
# asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12
vpxor % ymm8, % ymm12, % ymm12
# qhasm: r8 ^= r18
# asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11
vpxor % ymm8, % ymm11, % ymm11
# qhasm: r6 ^= r18
# asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9
vpxor % ymm8, % ymm9, % ymm9
# qhasm: r5 = r18
# asm 1: vmovapd <r18=reg256#9,>r5=reg256#9
# asm 2: vmovapd <r18=%ymm8,>r5=%ymm8
vmovapd % ymm8, % ymm8
# qhasm: a5 = mem256[ input_1 + 160 ]
# asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15
# asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14
vmovupd 160( % rsi), % ymm14
# qhasm: r = a5 & b0
# asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a5 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a5 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a5 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a5 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a5 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a5 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a5 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a5 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a5 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a5 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a5 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a5 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7
vpxor % ymm14, % ymm7, % ymm7
# qhasm: r8 ^= r17
# asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11
vpxor % ymm7, % ymm11, % ymm11
# qhasm: r7 ^= r17
# asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10
vpxor % ymm7, % ymm10, % ymm10
# qhasm: r5 ^= r17
# asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8
vpxor % ymm7, % ymm8, % ymm8
# qhasm: r4 = r17
# asm 1: vmovapd <r17=reg256#8,>r4=reg256#8
# asm 2: vmovapd <r17=%ymm7,>r4=%ymm7
vmovapd % ymm7, % ymm7
# qhasm: a4 = mem256[ input_1 + 128 ]
# asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15
# asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14
vmovupd 128( % rsi), % ymm14
# qhasm: r = a4 & b0
# asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a4 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a4 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a4 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a4 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a4 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a4 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a4 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a4 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a4 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a4 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a4 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a4 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6
vpxor % ymm14, % ymm6, % ymm6
# qhasm: r7 ^= r16
# asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10
vpxor % ymm6, % ymm10, % ymm10
# qhasm: r6 ^= r16
# asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9
vpxor % ymm6, % ymm9, % ymm9
# qhasm: r4 ^= r16
# asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7
vpxor % ymm6, % ymm7, % ymm7
# qhasm: r3 = r16
# asm 1: vmovapd <r16=reg256#7,>r3=reg256#7
# asm 2: vmovapd <r16=%ymm6,>r3=%ymm6
vmovapd % ymm6, % ymm6
# qhasm: a3 = mem256[ input_1 + 96 ]
# asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15
# asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14
vmovupd 96( % rsi), % ymm14
# qhasm: r = a3 & b0
# asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a3 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a3 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a3 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a3 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a3 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a3 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a3 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a3 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a3 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a3 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a3 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a3 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5
vpxor % ymm14, % ymm5, % ymm5
# qhasm: r6 ^= r15
# asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9
vpxor % ymm5, % ymm9, % ymm9
# qhasm: r5 ^= r15
# asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8
vpxor % ymm5, % ymm8, % ymm8
# qhasm: r3 ^= r15
# asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6
vpxor % ymm5, % ymm6, % ymm6
# qhasm: r2 = r15
# asm 1: vmovapd <r15=reg256#6,>r2=reg256#6
# asm 2: vmovapd <r15=%ymm5,>r2=%ymm5
vmovapd % ymm5, % ymm5
# qhasm: a2 = mem256[ input_1 + 64 ]
# asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15
# asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14
vmovupd 64( % rsi), % ymm14
# qhasm: r = a2 & b0
# asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a2 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a2 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a2 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a2 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a2 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a2 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a2 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a2 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a2 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a2 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a2 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a2 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4
vpxor % ymm14, % ymm4, % ymm4
# qhasm: r5 ^= r14
# asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8
vpxor % ymm4, % ymm8, % ymm8
# qhasm: r4 ^= r14
# asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7
vpxor % ymm4, % ymm7, % ymm7
# qhasm: r2 ^= r14
# asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5
vpxor % ymm4, % ymm5, % ymm5
# qhasm: r1 = r14
# asm 1: vmovapd <r14=reg256#5,>r1=reg256#5
# asm 2: vmovapd <r14=%ymm4,>r1=%ymm4
vmovapd % ymm4, % ymm4
# qhasm: a1 = mem256[ input_1 + 32 ]
# asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15
# asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14
vmovupd 32( % rsi), % ymm14
# qhasm: r = a1 & b0
# asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r1 ^= r
# asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5
# asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a1 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a1 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a1 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a1 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a1 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a1 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a1 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a1 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a1 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a1 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a1 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a1 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3
vpxor % ymm14, % ymm3, % ymm3
# qhasm: r4 ^= r13
# asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7
vpxor % ymm3, % ymm7, % ymm7
# qhasm: r3 ^= r13
# asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6
vpxor % ymm3, % ymm6, % ymm6
# qhasm: r1 ^= r13
# asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5
# asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4
vpxor % ymm3, % ymm4, % ymm4
# qhasm: r0 = r13
# asm 1: vmovapd <r13=reg256#4,>r0=reg256#4
# asm 2: vmovapd <r13=%ymm3,>r0=%ymm3
vmovapd % ymm3, % ymm3
# qhasm: a0 = mem256[ input_1 + 0 ]
# asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15
# asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14
vmovupd 0( % rsi), % ymm14
# qhasm: r = a0 & b0
# asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1
# asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0
vpand % ymm14, % ymm0, % ymm0
# qhasm: r0 ^= r
# asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4
# asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3
vpxor % ymm0, % ymm3, % ymm3
# qhasm: r = a0 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 32( % rdx), % ymm14, % ymm0
# qhasm: r1 ^= r
# asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5
# asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4
vpxor % ymm0, % ymm4, % ymm4
# qhasm: r = a0 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 64( % rdx), % ymm14, % ymm0
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5
vpxor % ymm0, % ymm5, % ymm5
# qhasm: r = a0 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 96( % rdx), % ymm14, % ymm0
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6
vpxor % ymm0, % ymm6, % ymm6
# qhasm: r = a0 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 128( % rdx), % ymm14, % ymm0
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7
vpxor % ymm0, % ymm7, % ymm7
# qhasm: r = a0 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 160( % rdx), % ymm14, % ymm0
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8
vpxor % ymm0, % ymm8, % ymm8
# qhasm: r = a0 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 192( % rdx), % ymm14, % ymm0
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9
vpxor % ymm0, % ymm9, % ymm9
# qhasm: r = a0 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 224( % rdx), % ymm14, % ymm0
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10
vpxor % ymm0, % ymm10, % ymm10
# qhasm: r = a0 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 256( % rdx), % ymm14, % ymm0
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11
vpxor % ymm0, % ymm11, % ymm11
# qhasm: r = a0 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 288( % rdx), % ymm14, % ymm0
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12
vpxor % ymm0, % ymm12, % ymm12
# qhasm: r = a0 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 320( % rdx), % ymm14, % ymm0
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13
vpxor % ymm0, % ymm13, % ymm13
# qhasm: r = a0 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 352( % rdx), % ymm14, % ymm0
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1
vpxor % ymm0, % ymm1, % ymm1
# qhasm: r = a0 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 384( % rdx), % ymm14, % ymm0
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2
vpxor % ymm0, % ymm2, % ymm2
# qhasm: mem256[ input_0 + 384 ] = r12
# asm 1: vmovupd <r12=reg256#3,384(<input_0=int64#1)
# asm 2: vmovupd <r12=%ymm2,384(<input_0=%rdi)
vmovupd % ymm2, 384( % rdi)
# qhasm: mem256[ input_0 + 352 ] = r11
# asm 1: vmovupd <r11=reg256#2,352(<input_0=int64#1)
# asm 2: vmovupd <r11=%ymm1,352(<input_0=%rdi)
vmovupd % ymm1, 352( % rdi)
# qhasm: mem256[ input_0 + 320 ] = r10
# asm 1: vmovupd <r10=reg256#14,320(<input_0=int64#1)
# asm 2: vmovupd <r10=%ymm13,320(<input_0=%rdi)
vmovupd % ymm13, 320( % rdi)
# qhasm: mem256[ input_0 + 288 ] = r9
# asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1)
# asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi)
vmovupd % ymm12, 288( % rdi)
# qhasm: mem256[ input_0 + 256 ] = r8
# asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1)
# asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi)
vmovupd % ymm11, 256( % rdi)
# qhasm: mem256[ input_0 + 224 ] = r7
# asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1)
# asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi)
vmovupd % ymm10, 224( % rdi)
# qhasm: mem256[ input_0 + 192 ] = r6
# asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1)
# asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi)
vmovupd % ymm9, 192( % rdi)
# qhasm: mem256[ input_0 + 160 ] = r5
# asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1)
# asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi)
vmovupd % ymm8, 160( % rdi)
# qhasm: mem256[ input_0 + 128 ] = r4
# asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1)
# asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi)
vmovupd % ymm7, 128( % rdi)
# qhasm: mem256[ input_0 + 96 ] = r3
# asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1)
# asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi)
vmovupd % ymm6, 96( % rdi)
# qhasm: mem256[ input_0 + 64 ] = r2
# asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1)
# asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi)
vmovupd % ymm5, 64( % rdi)
# qhasm: mem256[ input_0 + 32 ] = r1
# asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1)
# asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi)
vmovupd % ymm4, 32( % rdi)
# qhasm: mem256[ input_0 + 0 ] = r0
# asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1)
# asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi)
vmovupd % ymm3, 0( % rdi)
# qhasm: return
add % r11, % rsp
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 264,233
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/transpose_64x256_sp_asm.S
|
#include "namespace.h"
#define MASK0_0 CRYPTO_NAMESPACE(MASK0_0)
#define _MASK0_0 _CRYPTO_NAMESPACE(MASK0_0)
#define MASK0_1 CRYPTO_NAMESPACE(MASK0_1)
#define _MASK0_1 _CRYPTO_NAMESPACE(MASK0_1)
#define MASK1_0 CRYPTO_NAMESPACE(MASK1_0)
#define _MASK1_0 _CRYPTO_NAMESPACE(MASK1_0)
#define MASK1_1 CRYPTO_NAMESPACE(MASK1_1)
#define _MASK1_1 _CRYPTO_NAMESPACE(MASK1_1)
#define MASK2_0 CRYPTO_NAMESPACE(MASK2_0)
#define _MASK2_0 _CRYPTO_NAMESPACE(MASK2_0)
#define MASK2_1 CRYPTO_NAMESPACE(MASK2_1)
#define _MASK2_1 _CRYPTO_NAMESPACE(MASK2_1)
#define MASK3_0 CRYPTO_NAMESPACE(MASK3_0)
#define _MASK3_0 _CRYPTO_NAMESPACE(MASK3_0)
#define MASK3_1 CRYPTO_NAMESPACE(MASK3_1)
#define _MASK3_1 _CRYPTO_NAMESPACE(MASK3_1)
#define MASK4_0 CRYPTO_NAMESPACE(MASK4_0)
#define _MASK4_0 _CRYPTO_NAMESPACE(MASK4_0)
#define MASK4_1 CRYPTO_NAMESPACE(MASK4_1)
#define _MASK4_1 _CRYPTO_NAMESPACE(MASK4_1)
#define MASK5_0 CRYPTO_NAMESPACE(MASK5_0)
#define _MASK5_0 _CRYPTO_NAMESPACE(MASK5_0)
#define MASK5_1 CRYPTO_NAMESPACE(MASK5_1)
#define _MASK5_1 _CRYPTO_NAMESPACE(MASK5_1)
#define transpose_64x256_sp_asm CRYPTO_NAMESPACE(transpose_64x256_sp_asm)
#define _transpose_64x256_sp_asm _CRYPTO_NAMESPACE(transpose_64x256_sp_asm)
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: reg256 x0
# qhasm: reg256 x1
# qhasm: reg256 x2
# qhasm: reg256 x3
# qhasm: reg256 x4
# qhasm: reg256 x5
# qhasm: reg256 x6
# qhasm: reg256 x7
# qhasm: reg256 t0
# qhasm: reg256 t1
# qhasm: reg256 v00
# qhasm: reg256 v01
# qhasm: reg256 v10
# qhasm: reg256 v11
# qhasm: reg256 mask0
# qhasm: reg256 mask1
# qhasm: reg256 mask2
# qhasm: reg256 mask3
# qhasm: reg256 mask4
# qhasm: reg256 mask5
# qhasm: enter transpose_64x256_sp_asm
.p2align 5
.global _transpose_64x256_sp_asm
.global transpose_64x256_sp_asm
_transpose_64x256_sp_asm:
transpose_64x256_sp_asm:
mov % rsp, % r11
and $31, % r11
add $0, % r11
sub % r11, % rsp
# qhasm: mask0 aligned= mem256[ MASK5_0 ]
# asm 1: vmovapd MASK5_0(%rip),>mask0=reg256#1
# asm 2: vmovapd MASK5_0(%rip),>mask0=%ymm0
vmovapd MASK5_0( % rip), % ymm0
# qhasm: mask1 aligned= mem256[ MASK5_1 ]
# asm 1: vmovapd MASK5_1(%rip),>mask1=reg256#2
# asm 2: vmovapd MASK5_1(%rip),>mask1=%ymm1
vmovapd MASK5_1( % rip), % ymm1
# qhasm: mask2 aligned= mem256[ MASK4_0 ]
# asm 1: vmovapd MASK4_0(%rip),>mask2=reg256#3
# asm 2: vmovapd MASK4_0(%rip),>mask2=%ymm2
vmovapd MASK4_0( % rip), % ymm2
# qhasm: mask3 aligned= mem256[ MASK4_1 ]
# asm 1: vmovapd MASK4_1(%rip),>mask3=reg256#4
# asm 2: vmovapd MASK4_1(%rip),>mask3=%ymm3
vmovapd MASK4_1( % rip), % ymm3
# qhasm: mask4 aligned= mem256[ MASK3_0 ]
# asm 1: vmovapd MASK3_0(%rip),>mask4=reg256#5
# asm 2: vmovapd MASK3_0(%rip),>mask4=%ymm4
vmovapd MASK3_0( % rip), % ymm4
# qhasm: mask5 aligned= mem256[ MASK3_1 ]
# asm 1: vmovapd MASK3_1(%rip),>mask5=reg256#6
# asm 2: vmovapd MASK3_1(%rip),>mask5=%ymm5
vmovapd MASK3_1( % rip), % ymm5
# qhasm: x0 = mem256[ input_0 + 0 ]
# asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6
vmovupd 0( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 256 ]
# asm 1: vmovupd 256(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 256(<input_0=%rdi),>x1=%ymm7
vmovupd 256( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 512 ]
# asm 1: vmovupd 512(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 512(<input_0=%rdi),>x2=%ymm8
vmovupd 512( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 768 ]
# asm 1: vmovupd 768(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 768(<input_0=%rdi),>x3=%ymm9
vmovupd 768( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1024 ]
# asm 1: vmovupd 1024(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1024(<input_0=%rdi),>x4=%ymm10
vmovupd 1024( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1280 ]
# asm 1: vmovupd 1280(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1280(<input_0=%rdi),>x5=%ymm11
vmovupd 1280( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1536 ]
# asm 1: vmovupd 1536(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1536(<input_0=%rdi),>x6=%ymm12
vmovupd 1536( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 1792 ]
# asm 1: vmovupd 1792(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 1792(<input_0=%rdi),>x7=%ymm13
vmovupd 1792( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: 4x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
vpsllq $32, % ymm10, % ymm15
# qhasm: 4x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
vpsrlq $32, % ymm6, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: 4x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
vpsllq $32, % ymm11, % ymm15
# qhasm: 4x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
vpsrlq $32, % ymm7, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: 4x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
vpsllq $32, % ymm12, % ymm15
# qhasm: 4x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
vpsrlq $32, % ymm8, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: 4x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
vpsllq $32, % ymm13, % ymm15
# qhasm: 4x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
vpsrlq $32, % ymm9, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: 8x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
vpslld $16, % ymm11, % ymm15
# qhasm: 8x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
vpsrld $16, % ymm14, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: 8x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
vpslld $16, % ymm12, % ymm15
# qhasm: 8x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
vpsrld $16, % ymm10, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: 8x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
vpslld $16, % ymm8, % ymm15
# qhasm: 8x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
vpsrld $16, % ymm6, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: 8x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
vpslld $16, % ymm9, % ymm15
# qhasm: 8x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
vpsrld $16, % ymm7, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: 16x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
vpsllw $8, % ymm14, % ymm15
# qhasm: 16x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
vpsrlw $8, % ymm13, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: 16x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
vpsllw $8, % ymm10, % ymm15
# qhasm: 16x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
vpsrlw $8, % ymm11, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: 16x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
vpsllw $8, % ymm8, % ymm15
# qhasm: 16x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
vpsrlw $8, % ymm12, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: 16x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
vpsllw $8, % ymm7, % ymm15
# qhasm: 16x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
vpsrlw $8, % ymm6, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 0 ] = x0
# asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi)
vmovupd % ymm9, 0( % rdi)
# qhasm: mem256[ input_0 + 256 ] = x1
# asm 1: vmovupd <x1=reg256#14,256(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,256(<input_0=%rdi)
vmovupd % ymm13, 256( % rdi)
# qhasm: mem256[ input_0 + 512 ] = x2
# asm 1: vmovupd <x2=reg256#15,512(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,512(<input_0=%rdi)
vmovupd % ymm14, 512( % rdi)
# qhasm: mem256[ input_0 + 768 ] = x3
# asm 1: vmovupd <x3=reg256#11,768(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,768(<input_0=%rdi)
vmovupd % ymm10, 768( % rdi)
# qhasm: mem256[ input_0 + 1024 ] = x4
# asm 1: vmovupd <x4=reg256#12,1024(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,1024(<input_0=%rdi)
vmovupd % ymm11, 1024( % rdi)
# qhasm: mem256[ input_0 + 1280 ] = x5
# asm 1: vmovupd <x5=reg256#9,1280(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,1280(<input_0=%rdi)
vmovupd % ymm8, 1280( % rdi)
# qhasm: mem256[ input_0 + 1536 ] = x6
# asm 1: vmovupd <x6=reg256#13,1536(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,1536(<input_0=%rdi)
vmovupd % ymm12, 1536( % rdi)
# qhasm: mem256[ input_0 + 1792 ] = x7
# asm 1: vmovupd <x7=reg256#7,1792(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,1792(<input_0=%rdi)
vmovupd % ymm6, 1792( % rdi)
# qhasm: x0 = mem256[ input_0 + 32 ]
# asm 1: vmovupd 32(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 32(<input_0=%rdi),>x0=%ymm6
vmovupd 32( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 288 ]
# asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7
vmovupd 288( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 544 ]
# asm 1: vmovupd 544(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 544(<input_0=%rdi),>x2=%ymm8
vmovupd 544( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 800 ]
# asm 1: vmovupd 800(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 800(<input_0=%rdi),>x3=%ymm9
vmovupd 800( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1056 ]
# asm 1: vmovupd 1056(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1056(<input_0=%rdi),>x4=%ymm10
vmovupd 1056( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1312 ]
# asm 1: vmovupd 1312(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1312(<input_0=%rdi),>x5=%ymm11
vmovupd 1312( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1568 ]
# asm 1: vmovupd 1568(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1568(<input_0=%rdi),>x6=%ymm12
vmovupd 1568( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 1824 ]
# asm 1: vmovupd 1824(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 1824(<input_0=%rdi),>x7=%ymm13
vmovupd 1824( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: 4x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
vpsllq $32, % ymm10, % ymm15
# qhasm: 4x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
vpsrlq $32, % ymm6, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: 4x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
vpsllq $32, % ymm11, % ymm15
# qhasm: 4x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
vpsrlq $32, % ymm7, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: 4x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
vpsllq $32, % ymm12, % ymm15
# qhasm: 4x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
vpsrlq $32, % ymm8, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: 4x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
vpsllq $32, % ymm13, % ymm15
# qhasm: 4x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
vpsrlq $32, % ymm9, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: 8x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
vpslld $16, % ymm11, % ymm15
# qhasm: 8x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
vpsrld $16, % ymm14, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: 8x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
vpslld $16, % ymm12, % ymm15
# qhasm: 8x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
vpsrld $16, % ymm10, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: 8x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
vpslld $16, % ymm8, % ymm15
# qhasm: 8x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
vpsrld $16, % ymm6, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: 8x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
vpslld $16, % ymm9, % ymm15
# qhasm: 8x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
vpsrld $16, % ymm7, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: 16x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
vpsllw $8, % ymm14, % ymm15
# qhasm: 16x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
vpsrlw $8, % ymm13, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: 16x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
vpsllw $8, % ymm10, % ymm15
# qhasm: 16x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
vpsrlw $8, % ymm11, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: 16x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
vpsllw $8, % ymm8, % ymm15
# qhasm: 16x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
vpsrlw $8, % ymm12, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: 16x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
vpsllw $8, % ymm7, % ymm15
# qhasm: 16x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
vpsrlw $8, % ymm6, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 32 ] = x0
# asm 1: vmovupd <x0=reg256#10,32(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,32(<input_0=%rdi)
vmovupd % ymm9, 32( % rdi)
# qhasm: mem256[ input_0 + 288 ] = x1
# asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi)
vmovupd % ymm13, 288( % rdi)
# qhasm: mem256[ input_0 + 544 ] = x2
# asm 1: vmovupd <x2=reg256#15,544(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,544(<input_0=%rdi)
vmovupd % ymm14, 544( % rdi)
# qhasm: mem256[ input_0 + 800 ] = x3
# asm 1: vmovupd <x3=reg256#11,800(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,800(<input_0=%rdi)
vmovupd % ymm10, 800( % rdi)
# qhasm: mem256[ input_0 + 1056 ] = x4
# asm 1: vmovupd <x4=reg256#12,1056(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,1056(<input_0=%rdi)
vmovupd % ymm11, 1056( % rdi)
# qhasm: mem256[ input_0 + 1312 ] = x5
# asm 1: vmovupd <x5=reg256#9,1312(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,1312(<input_0=%rdi)
vmovupd % ymm8, 1312( % rdi)
# qhasm: mem256[ input_0 + 1568 ] = x6
# asm 1: vmovupd <x6=reg256#13,1568(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,1568(<input_0=%rdi)
vmovupd % ymm12, 1568( % rdi)
# qhasm: mem256[ input_0 + 1824 ] = x7
# asm 1: vmovupd <x7=reg256#7,1824(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,1824(<input_0=%rdi)
vmovupd % ymm6, 1824( % rdi)
# qhasm: x0 = mem256[ input_0 + 64 ]
# asm 1: vmovupd 64(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 64(<input_0=%rdi),>x0=%ymm6
vmovupd 64( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 320 ]
# asm 1: vmovupd 320(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 320(<input_0=%rdi),>x1=%ymm7
vmovupd 320( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 576 ]
# asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8
vmovupd 576( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 832 ]
# asm 1: vmovupd 832(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 832(<input_0=%rdi),>x3=%ymm9
vmovupd 832( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1088 ]
# asm 1: vmovupd 1088(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1088(<input_0=%rdi),>x4=%ymm10
vmovupd 1088( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1344 ]
# asm 1: vmovupd 1344(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1344(<input_0=%rdi),>x5=%ymm11
vmovupd 1344( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1600 ]
# asm 1: vmovupd 1600(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1600(<input_0=%rdi),>x6=%ymm12
vmovupd 1600( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 1856 ]
# asm 1: vmovupd 1856(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 1856(<input_0=%rdi),>x7=%ymm13
vmovupd 1856( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: 4x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
vpsllq $32, % ymm10, % ymm15
# qhasm: 4x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
vpsrlq $32, % ymm6, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: 4x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
vpsllq $32, % ymm11, % ymm15
# qhasm: 4x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
vpsrlq $32, % ymm7, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: 4x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
vpsllq $32, % ymm12, % ymm15
# qhasm: 4x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
vpsrlq $32, % ymm8, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: 4x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
vpsllq $32, % ymm13, % ymm15
# qhasm: 4x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
vpsrlq $32, % ymm9, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: 8x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
vpslld $16, % ymm11, % ymm15
# qhasm: 8x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
vpsrld $16, % ymm14, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: 8x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
vpslld $16, % ymm12, % ymm15
# qhasm: 8x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
vpsrld $16, % ymm10, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: 8x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
vpslld $16, % ymm8, % ymm15
# qhasm: 8x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
vpsrld $16, % ymm6, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: 8x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
vpslld $16, % ymm9, % ymm15
# qhasm: 8x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
vpsrld $16, % ymm7, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: 16x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
vpsllw $8, % ymm14, % ymm15
# qhasm: 16x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
vpsrlw $8, % ymm13, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: 16x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
vpsllw $8, % ymm10, % ymm15
# qhasm: 16x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
vpsrlw $8, % ymm11, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: 16x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
vpsllw $8, % ymm8, % ymm15
# qhasm: 16x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
vpsrlw $8, % ymm12, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: 16x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
vpsllw $8, % ymm7, % ymm15
# qhasm: 16x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
vpsrlw $8, % ymm6, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 64 ] = x0
# asm 1: vmovupd <x0=reg256#10,64(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,64(<input_0=%rdi)
vmovupd % ymm9, 64( % rdi)
# qhasm: mem256[ input_0 + 320 ] = x1
# asm 1: vmovupd <x1=reg256#14,320(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,320(<input_0=%rdi)
vmovupd % ymm13, 320( % rdi)
# qhasm: mem256[ input_0 + 576 ] = x2
# asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi)
vmovupd % ymm14, 576( % rdi)
# qhasm: mem256[ input_0 + 832 ] = x3
# asm 1: vmovupd <x3=reg256#11,832(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,832(<input_0=%rdi)
vmovupd % ymm10, 832( % rdi)
# qhasm: mem256[ input_0 + 1088 ] = x4
# asm 1: vmovupd <x4=reg256#12,1088(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,1088(<input_0=%rdi)
vmovupd % ymm11, 1088( % rdi)
# qhasm: mem256[ input_0 + 1344 ] = x5
# asm 1: vmovupd <x5=reg256#9,1344(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,1344(<input_0=%rdi)
vmovupd % ymm8, 1344( % rdi)
# qhasm: mem256[ input_0 + 1600 ] = x6
# asm 1: vmovupd <x6=reg256#13,1600(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,1600(<input_0=%rdi)
vmovupd % ymm12, 1600( % rdi)
# qhasm: mem256[ input_0 + 1856 ] = x7
# asm 1: vmovupd <x7=reg256#7,1856(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,1856(<input_0=%rdi)
vmovupd % ymm6, 1856( % rdi)
# qhasm: x0 = mem256[ input_0 + 96 ]
# asm 1: vmovupd 96(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 96(<input_0=%rdi),>x0=%ymm6
vmovupd 96( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 352 ]
# asm 1: vmovupd 352(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 352(<input_0=%rdi),>x1=%ymm7
vmovupd 352( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 608 ]
# asm 1: vmovupd 608(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 608(<input_0=%rdi),>x2=%ymm8
vmovupd 608( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 864 ]
# asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9
vmovupd 864( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1120 ]
# asm 1: vmovupd 1120(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1120(<input_0=%rdi),>x4=%ymm10
vmovupd 1120( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1376 ]
# asm 1: vmovupd 1376(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1376(<input_0=%rdi),>x5=%ymm11
vmovupd 1376( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1632 ]
# asm 1: vmovupd 1632(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1632(<input_0=%rdi),>x6=%ymm12
vmovupd 1632( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 1888 ]
# asm 1: vmovupd 1888(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 1888(<input_0=%rdi),>x7=%ymm13
vmovupd 1888( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: 4x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
vpsllq $32, % ymm10, % ymm15
# qhasm: 4x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
vpsrlq $32, % ymm6, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: 4x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
vpsllq $32, % ymm11, % ymm15
# qhasm: 4x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
vpsrlq $32, % ymm7, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: 4x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
vpsllq $32, % ymm12, % ymm15
# qhasm: 4x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
vpsrlq $32, % ymm8, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: 4x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
vpsllq $32, % ymm13, % ymm15
# qhasm: 4x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
vpsrlq $32, % ymm9, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: 8x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
vpslld $16, % ymm11, % ymm15
# qhasm: 8x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
vpsrld $16, % ymm14, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: 8x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
vpslld $16, % ymm12, % ymm15
# qhasm: 8x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
vpsrld $16, % ymm10, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: 8x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
vpslld $16, % ymm8, % ymm15
# qhasm: 8x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
vpsrld $16, % ymm6, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: 8x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
vpslld $16, % ymm9, % ymm15
# qhasm: 8x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
vpsrld $16, % ymm7, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: 16x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
vpsllw $8, % ymm14, % ymm15
# qhasm: 16x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
vpsrlw $8, % ymm13, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: 16x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
vpsllw $8, % ymm10, % ymm15
# qhasm: 16x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
vpsrlw $8, % ymm11, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: 16x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
vpsllw $8, % ymm8, % ymm15
# qhasm: 16x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
vpsrlw $8, % ymm12, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: 16x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
vpsllw $8, % ymm7, % ymm15
# qhasm: 16x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
vpsrlw $8, % ymm6, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 96 ] = x0
# asm 1: vmovupd <x0=reg256#10,96(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,96(<input_0=%rdi)
vmovupd % ymm9, 96( % rdi)
# qhasm: mem256[ input_0 + 352 ] = x1
# asm 1: vmovupd <x1=reg256#14,352(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,352(<input_0=%rdi)
vmovupd % ymm13, 352( % rdi)
# qhasm: mem256[ input_0 + 608 ] = x2
# asm 1: vmovupd <x2=reg256#15,608(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,608(<input_0=%rdi)
vmovupd % ymm14, 608( % rdi)
# qhasm: mem256[ input_0 + 864 ] = x3
# asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi)
vmovupd % ymm10, 864( % rdi)
# qhasm: mem256[ input_0 + 1120 ] = x4
# asm 1: vmovupd <x4=reg256#12,1120(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,1120(<input_0=%rdi)
vmovupd % ymm11, 1120( % rdi)
# qhasm: mem256[ input_0 + 1376 ] = x5
# asm 1: vmovupd <x5=reg256#9,1376(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,1376(<input_0=%rdi)
vmovupd % ymm8, 1376( % rdi)
# qhasm: mem256[ input_0 + 1632 ] = x6
# asm 1: vmovupd <x6=reg256#13,1632(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,1632(<input_0=%rdi)
vmovupd % ymm12, 1632( % rdi)
# qhasm: mem256[ input_0 + 1888 ] = x7
# asm 1: vmovupd <x7=reg256#7,1888(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,1888(<input_0=%rdi)
vmovupd % ymm6, 1888( % rdi)
# qhasm: x0 = mem256[ input_0 + 128 ]
# asm 1: vmovupd 128(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 128(<input_0=%rdi),>x0=%ymm6
vmovupd 128( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 384 ]
# asm 1: vmovupd 384(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 384(<input_0=%rdi),>x1=%ymm7
vmovupd 384( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 640 ]
# asm 1: vmovupd 640(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 640(<input_0=%rdi),>x2=%ymm8
vmovupd 640( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 896 ]
# asm 1: vmovupd 896(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 896(<input_0=%rdi),>x3=%ymm9
vmovupd 896( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1152 ]
# asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10
vmovupd 1152( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1408 ]
# asm 1: vmovupd 1408(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1408(<input_0=%rdi),>x5=%ymm11
vmovupd 1408( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1664 ]
# asm 1: vmovupd 1664(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1664(<input_0=%rdi),>x6=%ymm12
vmovupd 1664( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 1920 ]
# asm 1: vmovupd 1920(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 1920(<input_0=%rdi),>x7=%ymm13
vmovupd 1920( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: 4x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
vpsllq $32, % ymm10, % ymm15
# qhasm: 4x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
vpsrlq $32, % ymm6, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: 4x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
vpsllq $32, % ymm11, % ymm15
# qhasm: 4x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
vpsrlq $32, % ymm7, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: 4x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
vpsllq $32, % ymm12, % ymm15
# qhasm: 4x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
vpsrlq $32, % ymm8, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: 4x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
vpsllq $32, % ymm13, % ymm15
# qhasm: 4x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
vpsrlq $32, % ymm9, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: 8x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
vpslld $16, % ymm11, % ymm15
# qhasm: 8x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
vpsrld $16, % ymm14, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: 8x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
vpslld $16, % ymm12, % ymm15
# qhasm: 8x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
vpsrld $16, % ymm10, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: 8x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
vpslld $16, % ymm8, % ymm15
# qhasm: 8x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
vpsrld $16, % ymm6, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: 8x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
vpslld $16, % ymm9, % ymm15
# qhasm: 8x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
vpsrld $16, % ymm7, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: 16x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
vpsllw $8, % ymm14, % ymm15
# qhasm: 16x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
vpsrlw $8, % ymm13, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: 16x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
vpsllw $8, % ymm10, % ymm15
# qhasm: 16x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
vpsrlw $8, % ymm11, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: 16x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
vpsllw $8, % ymm8, % ymm15
# qhasm: 16x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
vpsrlw $8, % ymm12, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: 16x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
vpsllw $8, % ymm7, % ymm15
# qhasm: 16x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
vpsrlw $8, % ymm6, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 128 ] = x0
# asm 1: vmovupd <x0=reg256#10,128(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,128(<input_0=%rdi)
vmovupd % ymm9, 128( % rdi)
# qhasm: mem256[ input_0 + 384 ] = x1
# asm 1: vmovupd <x1=reg256#14,384(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,384(<input_0=%rdi)
vmovupd % ymm13, 384( % rdi)
# qhasm: mem256[ input_0 + 640 ] = x2
# asm 1: vmovupd <x2=reg256#15,640(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,640(<input_0=%rdi)
vmovupd % ymm14, 640( % rdi)
# qhasm: mem256[ input_0 + 896 ] = x3
# asm 1: vmovupd <x3=reg256#11,896(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,896(<input_0=%rdi)
vmovupd % ymm10, 896( % rdi)
# qhasm: mem256[ input_0 + 1152 ] = x4
# asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi)
vmovupd % ymm11, 1152( % rdi)
# qhasm: mem256[ input_0 + 1408 ] = x5
# asm 1: vmovupd <x5=reg256#9,1408(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,1408(<input_0=%rdi)
vmovupd % ymm8, 1408( % rdi)
# qhasm: mem256[ input_0 + 1664 ] = x6
# asm 1: vmovupd <x6=reg256#13,1664(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,1664(<input_0=%rdi)
vmovupd % ymm12, 1664( % rdi)
# qhasm: mem256[ input_0 + 1920 ] = x7
# asm 1: vmovupd <x7=reg256#7,1920(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,1920(<input_0=%rdi)
vmovupd % ymm6, 1920( % rdi)
# qhasm: x0 = mem256[ input_0 + 160 ]
# asm 1: vmovupd 160(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 160(<input_0=%rdi),>x0=%ymm6
vmovupd 160( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 416 ]
# asm 1: vmovupd 416(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 416(<input_0=%rdi),>x1=%ymm7
vmovupd 416( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 672 ]
# asm 1: vmovupd 672(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 672(<input_0=%rdi),>x2=%ymm8
vmovupd 672( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 928 ]
# asm 1: vmovupd 928(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 928(<input_0=%rdi),>x3=%ymm9
vmovupd 928( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1184 ]
# asm 1: vmovupd 1184(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1184(<input_0=%rdi),>x4=%ymm10
vmovupd 1184( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1440 ]
# asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11
vmovupd 1440( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1696 ]
# asm 1: vmovupd 1696(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1696(<input_0=%rdi),>x6=%ymm12
vmovupd 1696( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 1952 ]
# asm 1: vmovupd 1952(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 1952(<input_0=%rdi),>x7=%ymm13
vmovupd 1952( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: 4x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
vpsllq $32, % ymm10, % ymm15
# qhasm: 4x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
vpsrlq $32, % ymm6, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: 4x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
vpsllq $32, % ymm11, % ymm15
# qhasm: 4x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
vpsrlq $32, % ymm7, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: 4x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
vpsllq $32, % ymm12, % ymm15
# qhasm: 4x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
vpsrlq $32, % ymm8, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: 4x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
vpsllq $32, % ymm13, % ymm15
# qhasm: 4x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
vpsrlq $32, % ymm9, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: 8x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
vpslld $16, % ymm11, % ymm15
# qhasm: 8x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
vpsrld $16, % ymm14, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: 8x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
vpslld $16, % ymm12, % ymm15
# qhasm: 8x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
vpsrld $16, % ymm10, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: 8x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
vpslld $16, % ymm8, % ymm15
# qhasm: 8x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
vpsrld $16, % ymm6, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: 8x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
vpslld $16, % ymm9, % ymm15
# qhasm: 8x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
vpsrld $16, % ymm7, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: 16x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
vpsllw $8, % ymm14, % ymm15
# qhasm: 16x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
vpsrlw $8, % ymm13, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: 16x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
vpsllw $8, % ymm10, % ymm15
# qhasm: 16x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
vpsrlw $8, % ymm11, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: 16x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
vpsllw $8, % ymm8, % ymm15
# qhasm: 16x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
vpsrlw $8, % ymm12, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: 16x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
vpsllw $8, % ymm7, % ymm15
# qhasm: 16x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
vpsrlw $8, % ymm6, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 160 ] = x0
# asm 1: vmovupd <x0=reg256#10,160(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,160(<input_0=%rdi)
vmovupd % ymm9, 160( % rdi)
# qhasm: mem256[ input_0 + 416 ] = x1
# asm 1: vmovupd <x1=reg256#14,416(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,416(<input_0=%rdi)
vmovupd % ymm13, 416( % rdi)
# qhasm: mem256[ input_0 + 672 ] = x2
# asm 1: vmovupd <x2=reg256#15,672(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,672(<input_0=%rdi)
vmovupd % ymm14, 672( % rdi)
# qhasm: mem256[ input_0 + 928 ] = x3
# asm 1: vmovupd <x3=reg256#11,928(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,928(<input_0=%rdi)
vmovupd % ymm10, 928( % rdi)
# qhasm: mem256[ input_0 + 1184 ] = x4
# asm 1: vmovupd <x4=reg256#12,1184(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,1184(<input_0=%rdi)
vmovupd % ymm11, 1184( % rdi)
# qhasm: mem256[ input_0 + 1440 ] = x5
# asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi)
vmovupd % ymm8, 1440( % rdi)
# qhasm: mem256[ input_0 + 1696 ] = x6
# asm 1: vmovupd <x6=reg256#13,1696(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,1696(<input_0=%rdi)
vmovupd % ymm12, 1696( % rdi)
# qhasm: mem256[ input_0 + 1952 ] = x7
# asm 1: vmovupd <x7=reg256#7,1952(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,1952(<input_0=%rdi)
vmovupd % ymm6, 1952( % rdi)
# qhasm: x0 = mem256[ input_0 + 192 ]
# asm 1: vmovupd 192(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 192(<input_0=%rdi),>x0=%ymm6
vmovupd 192( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 448 ]
# asm 1: vmovupd 448(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 448(<input_0=%rdi),>x1=%ymm7
vmovupd 448( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 704 ]
# asm 1: vmovupd 704(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 704(<input_0=%rdi),>x2=%ymm8
vmovupd 704( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 960 ]
# asm 1: vmovupd 960(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 960(<input_0=%rdi),>x3=%ymm9
vmovupd 960( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1216 ]
# asm 1: vmovupd 1216(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1216(<input_0=%rdi),>x4=%ymm10
vmovupd 1216( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1472 ]
# asm 1: vmovupd 1472(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1472(<input_0=%rdi),>x5=%ymm11
vmovupd 1472( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1728 ]
# asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12
vmovupd 1728( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 1984 ]
# asm 1: vmovupd 1984(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 1984(<input_0=%rdi),>x7=%ymm13
vmovupd 1984( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: 4x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
vpsllq $32, % ymm10, % ymm15
# qhasm: 4x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
vpsrlq $32, % ymm6, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: 4x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
vpsllq $32, % ymm11, % ymm15
# qhasm: 4x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
vpsrlq $32, % ymm7, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: 4x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
vpsllq $32, % ymm12, % ymm15
# qhasm: 4x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
vpsrlq $32, % ymm8, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: 4x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
vpsllq $32, % ymm13, % ymm15
# qhasm: 4x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
vpsrlq $32, % ymm9, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: 8x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
vpslld $16, % ymm11, % ymm15
# qhasm: 8x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
vpsrld $16, % ymm14, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: 8x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
vpslld $16, % ymm12, % ymm15
# qhasm: 8x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
vpsrld $16, % ymm10, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: 8x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
vpslld $16, % ymm8, % ymm15
# qhasm: 8x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
vpsrld $16, % ymm6, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: 8x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
vpslld $16, % ymm9, % ymm15
# qhasm: 8x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
vpsrld $16, % ymm7, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: 16x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
vpsllw $8, % ymm14, % ymm15
# qhasm: 16x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
vpsrlw $8, % ymm13, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: 16x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
vpsllw $8, % ymm10, % ymm15
# qhasm: 16x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
vpsrlw $8, % ymm11, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: 16x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
vpsllw $8, % ymm8, % ymm15
# qhasm: 16x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
vpsrlw $8, % ymm12, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: 16x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
vpsllw $8, % ymm7, % ymm15
# qhasm: 16x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
vpsrlw $8, % ymm6, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 192 ] = x0
# asm 1: vmovupd <x0=reg256#10,192(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,192(<input_0=%rdi)
vmovupd % ymm9, 192( % rdi)
# qhasm: mem256[ input_0 + 448 ] = x1
# asm 1: vmovupd <x1=reg256#14,448(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,448(<input_0=%rdi)
vmovupd % ymm13, 448( % rdi)
# qhasm: mem256[ input_0 + 704 ] = x2
# asm 1: vmovupd <x2=reg256#15,704(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,704(<input_0=%rdi)
vmovupd % ymm14, 704( % rdi)
# qhasm: mem256[ input_0 + 960 ] = x3
# asm 1: vmovupd <x3=reg256#11,960(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,960(<input_0=%rdi)
vmovupd % ymm10, 960( % rdi)
# qhasm: mem256[ input_0 + 1216 ] = x4
# asm 1: vmovupd <x4=reg256#12,1216(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,1216(<input_0=%rdi)
vmovupd % ymm11, 1216( % rdi)
# qhasm: mem256[ input_0 + 1472 ] = x5
# asm 1: vmovupd <x5=reg256#9,1472(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,1472(<input_0=%rdi)
vmovupd % ymm8, 1472( % rdi)
# qhasm: mem256[ input_0 + 1728 ] = x6
# asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi)
vmovupd % ymm12, 1728( % rdi)
# qhasm: mem256[ input_0 + 1984 ] = x7
# asm 1: vmovupd <x7=reg256#7,1984(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,1984(<input_0=%rdi)
vmovupd % ymm6, 1984( % rdi)
# qhasm: x0 = mem256[ input_0 + 224 ]
# asm 1: vmovupd 224(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 224(<input_0=%rdi),>x0=%ymm6
vmovupd 224( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 480 ]
# asm 1: vmovupd 480(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 480(<input_0=%rdi),>x1=%ymm7
vmovupd 480( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 736 ]
# asm 1: vmovupd 736(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 736(<input_0=%rdi),>x2=%ymm8
vmovupd 736( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 992 ]
# asm 1: vmovupd 992(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 992(<input_0=%rdi),>x3=%ymm9
vmovupd 992( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1248 ]
# asm 1: vmovupd 1248(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1248(<input_0=%rdi),>x4=%ymm10
vmovupd 1248( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1504 ]
# asm 1: vmovupd 1504(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1504(<input_0=%rdi),>x5=%ymm11
vmovupd 1504( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1760 ]
# asm 1: vmovupd 1760(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1760(<input_0=%rdi),>x6=%ymm12
vmovupd 1760( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 2016 ]
# asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13
vmovupd 2016( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: 4x v10 = x4 << 32
# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
vpsllq $32, % ymm10, % ymm15
# qhasm: 4x v01 = x0 unsigned>> 32
# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
vpsrlq $32, % ymm6, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: 4x v10 = x5 << 32
# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
vpsllq $32, % ymm11, % ymm15
# qhasm: 4x v01 = x1 unsigned>> 32
# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
vpsrlq $32, % ymm7, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: 4x v10 = x6 << 32
# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
vpsllq $32, % ymm12, % ymm15
# qhasm: 4x v01 = x2 unsigned>> 32
# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
vpsrlq $32, % ymm8, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0
vpand % ymm9, % ymm0, % ymm0
# qhasm: 4x v10 = x7 << 32
# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13
# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12
vpsllq $32, % ymm13, % ymm12
# qhasm: 4x v01 = x3 unsigned>> 32
# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
vpsrlq $32, % ymm9, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1
vpand % ymm13, % ymm1, % ymm1
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#1,<v10=reg256#13,>x3=reg256#1
# asm 2: vpor <v00=%ymm0,<v10=%ymm12,>x3=%ymm0
vpor % ymm0, % ymm12, % ymm0
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2
# asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1
vpor % ymm9, % ymm1, % ymm1
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9
vpand % ymm14, % ymm2, % ymm9
# qhasm: 8x v10 = x2 << 16
# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13
# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12
vpslld $16, % ymm11, % ymm12
# qhasm: 8x v01 = x0 unsigned>> 16
# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14
# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13
vpsrld $16, % ymm14, % ymm13
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9
vpor % ymm9, % ymm12, % ymm9
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11
vpor % ymm13, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12
vpand % ymm10, % ymm2, % ymm12
# qhasm: 8x v10 = x3 << 16
# asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14
# asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13
vpslld $16, % ymm0, % ymm13
# qhasm: 8x v01 = x1 unsigned>> 16
# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
vpsrld $16, % ymm10, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1
# asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0
vpand % ymm0, % ymm3, % ymm0
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12
vpor % ymm12, % ymm13, % ymm12
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1
# asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0
vpor % ymm10, % ymm0, % ymm0
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10
vpand % ymm6, % ymm2, % ymm10
# qhasm: 8x v10 = x6 << 16
# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14
# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13
vpslld $16, % ymm8, % ymm13
# qhasm: 8x v01 = x4 unsigned>> 16
# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
vpsrld $16, % ymm6, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10
vpor % ymm10, % ymm13, % ymm10
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2
vpand % ymm7, % ymm2, % ymm2
# qhasm: 8x v10 = x7 << 16
# asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9
# asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8
vpslld $16, % ymm1, % ymm8
# qhasm: 8x v01 = x5 unsigned>> 16
# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
vpsrld $16, % ymm7, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2
# asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1
vpand % ymm1, % ymm3, % ymm1
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#3,<v10=reg256#9,>x5=reg256#3
# asm 2: vpor <v00=%ymm2,<v10=%ymm8,>x5=%ymm2
vpor % ymm2, % ymm8, % ymm2
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2
# asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1
vpor % ymm7, % ymm1, % ymm1
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4
# asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3
vpand % ymm9, % ymm4, % ymm3
# qhasm: 16x v10 = x1 << 8
# asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8
# asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7
vpsllw $8, % ymm12, % ymm7
# qhasm: 16x v01 = x0 unsigned>> 8
# asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9
# asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8
vpsrlw $8, % ymm9, % ymm8
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10
# asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9
vpand % ymm12, % ymm5, % ymm9
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4
# asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3
vpor % ymm3, % ymm7, % ymm3
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8
# asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7
vpor % ymm8, % ymm9, % ymm7
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8
vpand % ymm11, % ymm4, % ymm8
# qhasm: 16x v10 = x3 << 8
# asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10
# asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9
vpsllw $8, % ymm0, % ymm9
# qhasm: 16x v01 = x2 unsigned>> 8
# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
vpsrlw $8, % ymm11, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1
# asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0
vpand % ymm0, % ymm5, % ymm0
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8
vpor % ymm8, % ymm9, % ymm8
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1
# asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0
vpor % ymm11, % ymm0, % ymm0
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9
vpand % ymm10, % ymm4, % ymm9
# qhasm: 16x v10 = x5 << 8
# asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12
# asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11
vpsllw $8, % ymm2, % ymm11
# qhasm: 16x v01 = x4 unsigned>> 8
# asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11
# asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10
vpsrlw $8, % ymm10, % ymm10
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3
# asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2
vpand % ymm2, % ymm5, % ymm2
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9
vpor % ymm9, % ymm11, % ymm9
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3
# asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2
vpor % ymm10, % ymm2, % ymm2
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4
vpand % ymm6, % ymm4, % ymm4
# qhasm: 16x v10 = x7 << 8
# asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11
# asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10
vpsllw $8, % ymm1, % ymm10
# qhasm: 16x v01 = x6 unsigned>> 8
# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
vpsrlw $8, % ymm6, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2
# asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1
vpand % ymm1, % ymm5, % ymm1
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#5,<v10=reg256#11,>x6=reg256#5
# asm 2: vpor <v00=%ymm4,<v10=%ymm10,>x6=%ymm4
vpor % ymm4, % ymm10, % ymm4
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2
# asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1
vpor % ymm6, % ymm1, % ymm1
# qhasm: mem256[ input_0 + 224 ] = x0
# asm 1: vmovupd <x0=reg256#4,224(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm3,224(<input_0=%rdi)
vmovupd % ymm3, 224( % rdi)
# qhasm: mem256[ input_0 + 480 ] = x1
# asm 1: vmovupd <x1=reg256#8,480(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm7,480(<input_0=%rdi)
vmovupd % ymm7, 480( % rdi)
# qhasm: mem256[ input_0 + 736 ] = x2
# asm 1: vmovupd <x2=reg256#9,736(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm8,736(<input_0=%rdi)
vmovupd % ymm8, 736( % rdi)
# qhasm: mem256[ input_0 + 992 ] = x3
# asm 1: vmovupd <x3=reg256#1,992(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm0,992(<input_0=%rdi)
vmovupd % ymm0, 992( % rdi)
# qhasm: mem256[ input_0 + 1248 ] = x4
# asm 1: vmovupd <x4=reg256#10,1248(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm9,1248(<input_0=%rdi)
vmovupd % ymm9, 1248( % rdi)
# qhasm: mem256[ input_0 + 1504 ] = x5
# asm 1: vmovupd <x5=reg256#3,1504(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm2,1504(<input_0=%rdi)
vmovupd % ymm2, 1504( % rdi)
# qhasm: mem256[ input_0 + 1760 ] = x6
# asm 1: vmovupd <x6=reg256#5,1760(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm4,1760(<input_0=%rdi)
vmovupd % ymm4, 1760( % rdi)
# qhasm: mem256[ input_0 + 2016 ] = x7
# asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi)
vmovupd % ymm1, 2016( % rdi)
# qhasm: mask0 aligned= mem256[ MASK2_0 ]
# asm 1: vmovapd MASK2_0(%rip),>mask0=reg256#1
# asm 2: vmovapd MASK2_0(%rip),>mask0=%ymm0
vmovapd MASK2_0( % rip), % ymm0
# qhasm: mask1 aligned= mem256[ MASK2_1 ]
# asm 1: vmovapd MASK2_1(%rip),>mask1=reg256#2
# asm 2: vmovapd MASK2_1(%rip),>mask1=%ymm1
vmovapd MASK2_1( % rip), % ymm1
# qhasm: mask2 aligned= mem256[ MASK1_0 ]
# asm 1: vmovapd MASK1_0(%rip),>mask2=reg256#3
# asm 2: vmovapd MASK1_0(%rip),>mask2=%ymm2
vmovapd MASK1_0( % rip), % ymm2
# qhasm: mask3 aligned= mem256[ MASK1_1 ]
# asm 1: vmovapd MASK1_1(%rip),>mask3=reg256#4
# asm 2: vmovapd MASK1_1(%rip),>mask3=%ymm3
vmovapd MASK1_1( % rip), % ymm3
# qhasm: mask4 aligned= mem256[ MASK0_0 ]
# asm 1: vmovapd MASK0_0(%rip),>mask4=reg256#5
# asm 2: vmovapd MASK0_0(%rip),>mask4=%ymm4
vmovapd MASK0_0( % rip), % ymm4
# qhasm: mask5 aligned= mem256[ MASK0_1 ]
# asm 1: vmovapd MASK0_1(%rip),>mask5=reg256#6
# asm 2: vmovapd MASK0_1(%rip),>mask5=%ymm5
vmovapd MASK0_1( % rip), % ymm5
# qhasm: x0 = mem256[ input_0 + 0 ]
# asm 1: vmovupd 0(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 0(<input_0=%rdi),>x0=%ymm6
vmovupd 0( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 32 ]
# asm 1: vmovupd 32(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 32(<input_0=%rdi),>x1=%ymm7
vmovupd 32( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 64 ]
# asm 1: vmovupd 64(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 64(<input_0=%rdi),>x2=%ymm8
vmovupd 64( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 96 ]
# asm 1: vmovupd 96(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 96(<input_0=%rdi),>x3=%ymm9
vmovupd 96( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 128 ]
# asm 1: vmovupd 128(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 128(<input_0=%rdi),>x4=%ymm10
vmovupd 128( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 160 ]
# asm 1: vmovupd 160(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 160(<input_0=%rdi),>x5=%ymm11
vmovupd 160( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 192 ]
# asm 1: vmovupd 192(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 192(<input_0=%rdi),>x6=%ymm12
vmovupd 192( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 224 ]
# asm 1: vmovupd 224(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 224(<input_0=%rdi),>x7=%ymm13
vmovupd 224( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
vpand % ymm10, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
vpand % ymm6, % ymm1, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
vpsrlq $4, % ymm6, % ymm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
vpand % ymm11, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
vpand % ymm7, % ymm1, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
vpsrlq $4, % ymm7, % ymm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
vpand % ymm12, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
vpand % ymm8, % ymm1, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
vpsrlq $4, % ymm8, % ymm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
vpand % ymm13, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
vpand % ymm9, % ymm1, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
vpsrlq $4, % ymm9, % ymm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
vpand % ymm11, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
vpand % ymm14, % ymm3, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
vpsrlq $2, % ymm14, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
vpand % ymm12, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
vpand % ymm10, % ymm3, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
vpsrlq $2, % ymm10, % ymm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
vpand % ymm8, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
vpand % ymm6, % ymm3, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
vpsrlq $2, % ymm6, % ymm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
vpand % ymm9, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
vpand % ymm7, % ymm3, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
vpsrlq $2, % ymm7, % ymm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
vpand % ymm14, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
vpand % ymm13, % ymm5, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
vpsrlq $1, % ymm13, % ymm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
vpand % ymm10, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
vpand % ymm11, % ymm5, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
vpsrlq $1, % ymm11, % ymm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
vpand % ymm8, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
vpand % ymm12, % ymm5, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
vpsrlq $1, % ymm12, % ymm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
vpand % ymm7, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
vpand % ymm6, % ymm5, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
vpsrlq $1, % ymm6, % ymm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 0 ] = x0
# asm 1: vmovupd <x0=reg256#10,0(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,0(<input_0=%rdi)
vmovupd % ymm9, 0( % rdi)
# qhasm: mem256[ input_0 + 32 ] = x1
# asm 1: vmovupd <x1=reg256#14,32(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,32(<input_0=%rdi)
vmovupd % ymm13, 32( % rdi)
# qhasm: mem256[ input_0 + 64 ] = x2
# asm 1: vmovupd <x2=reg256#15,64(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,64(<input_0=%rdi)
vmovupd % ymm14, 64( % rdi)
# qhasm: mem256[ input_0 + 96 ] = x3
# asm 1: vmovupd <x3=reg256#11,96(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,96(<input_0=%rdi)
vmovupd % ymm10, 96( % rdi)
# qhasm: mem256[ input_0 + 128 ] = x4
# asm 1: vmovupd <x4=reg256#12,128(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,128(<input_0=%rdi)
vmovupd % ymm11, 128( % rdi)
# qhasm: mem256[ input_0 + 160 ] = x5
# asm 1: vmovupd <x5=reg256#9,160(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,160(<input_0=%rdi)
vmovupd % ymm8, 160( % rdi)
# qhasm: mem256[ input_0 + 192 ] = x6
# asm 1: vmovupd <x6=reg256#13,192(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,192(<input_0=%rdi)
vmovupd % ymm12, 192( % rdi)
# qhasm: mem256[ input_0 + 224 ] = x7
# asm 1: vmovupd <x7=reg256#7,224(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,224(<input_0=%rdi)
vmovupd % ymm6, 224( % rdi)
# qhasm: x0 = mem256[ input_0 + 256 ]
# asm 1: vmovupd 256(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 256(<input_0=%rdi),>x0=%ymm6
vmovupd 256( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 288 ]
# asm 1: vmovupd 288(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 288(<input_0=%rdi),>x1=%ymm7
vmovupd 288( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 320 ]
# asm 1: vmovupd 320(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 320(<input_0=%rdi),>x2=%ymm8
vmovupd 320( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 352 ]
# asm 1: vmovupd 352(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 352(<input_0=%rdi),>x3=%ymm9
vmovupd 352( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 384 ]
# asm 1: vmovupd 384(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 384(<input_0=%rdi),>x4=%ymm10
vmovupd 384( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 416 ]
# asm 1: vmovupd 416(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 416(<input_0=%rdi),>x5=%ymm11
vmovupd 416( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 448 ]
# asm 1: vmovupd 448(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 448(<input_0=%rdi),>x6=%ymm12
vmovupd 448( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 480 ]
# asm 1: vmovupd 480(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 480(<input_0=%rdi),>x7=%ymm13
vmovupd 480( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
vpand % ymm10, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
vpand % ymm6, % ymm1, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
vpsrlq $4, % ymm6, % ymm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
vpand % ymm11, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
vpand % ymm7, % ymm1, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
vpsrlq $4, % ymm7, % ymm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
vpand % ymm12, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
vpand % ymm8, % ymm1, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
vpsrlq $4, % ymm8, % ymm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
vpand % ymm13, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
vpand % ymm9, % ymm1, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
vpsrlq $4, % ymm9, % ymm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
vpand % ymm11, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
vpand % ymm14, % ymm3, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
vpsrlq $2, % ymm14, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
vpand % ymm12, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
vpand % ymm10, % ymm3, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
vpsrlq $2, % ymm10, % ymm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
vpand % ymm8, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
vpand % ymm6, % ymm3, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
vpsrlq $2, % ymm6, % ymm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
vpand % ymm9, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
vpand % ymm7, % ymm3, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
vpsrlq $2, % ymm7, % ymm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
vpand % ymm14, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
vpand % ymm13, % ymm5, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
vpsrlq $1, % ymm13, % ymm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
vpand % ymm10, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
vpand % ymm11, % ymm5, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
vpsrlq $1, % ymm11, % ymm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
vpand % ymm8, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
vpand % ymm12, % ymm5, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
vpsrlq $1, % ymm12, % ymm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
vpand % ymm7, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
vpand % ymm6, % ymm5, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
vpsrlq $1, % ymm6, % ymm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 256 ] = x0
# asm 1: vmovupd <x0=reg256#10,256(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,256(<input_0=%rdi)
vmovupd % ymm9, 256( % rdi)
# qhasm: mem256[ input_0 + 288 ] = x1
# asm 1: vmovupd <x1=reg256#14,288(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,288(<input_0=%rdi)
vmovupd % ymm13, 288( % rdi)
# qhasm: mem256[ input_0 + 320 ] = x2
# asm 1: vmovupd <x2=reg256#15,320(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,320(<input_0=%rdi)
vmovupd % ymm14, 320( % rdi)
# qhasm: mem256[ input_0 + 352 ] = x3
# asm 1: vmovupd <x3=reg256#11,352(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,352(<input_0=%rdi)
vmovupd % ymm10, 352( % rdi)
# qhasm: mem256[ input_0 + 384 ] = x4
# asm 1: vmovupd <x4=reg256#12,384(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,384(<input_0=%rdi)
vmovupd % ymm11, 384( % rdi)
# qhasm: mem256[ input_0 + 416 ] = x5
# asm 1: vmovupd <x5=reg256#9,416(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,416(<input_0=%rdi)
vmovupd % ymm8, 416( % rdi)
# qhasm: mem256[ input_0 + 448 ] = x6
# asm 1: vmovupd <x6=reg256#13,448(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,448(<input_0=%rdi)
vmovupd % ymm12, 448( % rdi)
# qhasm: mem256[ input_0 + 480 ] = x7
# asm 1: vmovupd <x7=reg256#7,480(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,480(<input_0=%rdi)
vmovupd % ymm6, 480( % rdi)
# qhasm: x0 = mem256[ input_0 + 512 ]
# asm 1: vmovupd 512(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 512(<input_0=%rdi),>x0=%ymm6
vmovupd 512( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 544 ]
# asm 1: vmovupd 544(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 544(<input_0=%rdi),>x1=%ymm7
vmovupd 544( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 576 ]
# asm 1: vmovupd 576(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 576(<input_0=%rdi),>x2=%ymm8
vmovupd 576( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 608 ]
# asm 1: vmovupd 608(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 608(<input_0=%rdi),>x3=%ymm9
vmovupd 608( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 640 ]
# asm 1: vmovupd 640(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 640(<input_0=%rdi),>x4=%ymm10
vmovupd 640( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 672 ]
# asm 1: vmovupd 672(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 672(<input_0=%rdi),>x5=%ymm11
vmovupd 672( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 704 ]
# asm 1: vmovupd 704(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 704(<input_0=%rdi),>x6=%ymm12
vmovupd 704( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 736 ]
# asm 1: vmovupd 736(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 736(<input_0=%rdi),>x7=%ymm13
vmovupd 736( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
vpand % ymm10, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
vpand % ymm6, % ymm1, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
vpsrlq $4, % ymm6, % ymm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
vpand % ymm11, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
vpand % ymm7, % ymm1, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
vpsrlq $4, % ymm7, % ymm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
vpand % ymm12, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
vpand % ymm8, % ymm1, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
vpsrlq $4, % ymm8, % ymm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
vpand % ymm13, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
vpand % ymm9, % ymm1, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
vpsrlq $4, % ymm9, % ymm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
vpand % ymm11, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
vpand % ymm14, % ymm3, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
vpsrlq $2, % ymm14, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
vpand % ymm12, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
vpand % ymm10, % ymm3, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
vpsrlq $2, % ymm10, % ymm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
vpand % ymm8, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
vpand % ymm6, % ymm3, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
vpsrlq $2, % ymm6, % ymm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
vpand % ymm9, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
vpand % ymm7, % ymm3, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
vpsrlq $2, % ymm7, % ymm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
vpand % ymm14, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
vpand % ymm13, % ymm5, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
vpsrlq $1, % ymm13, % ymm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
vpand % ymm10, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
vpand % ymm11, % ymm5, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
vpsrlq $1, % ymm11, % ymm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
vpand % ymm8, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
vpand % ymm12, % ymm5, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
vpsrlq $1, % ymm12, % ymm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
vpand % ymm7, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
vpand % ymm6, % ymm5, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
vpsrlq $1, % ymm6, % ymm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 512 ] = x0
# asm 1: vmovupd <x0=reg256#10,512(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,512(<input_0=%rdi)
vmovupd % ymm9, 512( % rdi)
# qhasm: mem256[ input_0 + 544 ] = x1
# asm 1: vmovupd <x1=reg256#14,544(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,544(<input_0=%rdi)
vmovupd % ymm13, 544( % rdi)
# qhasm: mem256[ input_0 + 576 ] = x2
# asm 1: vmovupd <x2=reg256#15,576(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,576(<input_0=%rdi)
vmovupd % ymm14, 576( % rdi)
# qhasm: mem256[ input_0 + 608 ] = x3
# asm 1: vmovupd <x3=reg256#11,608(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,608(<input_0=%rdi)
vmovupd % ymm10, 608( % rdi)
# qhasm: mem256[ input_0 + 640 ] = x4
# asm 1: vmovupd <x4=reg256#12,640(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,640(<input_0=%rdi)
vmovupd % ymm11, 640( % rdi)
# qhasm: mem256[ input_0 + 672 ] = x5
# asm 1: vmovupd <x5=reg256#9,672(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,672(<input_0=%rdi)
vmovupd % ymm8, 672( % rdi)
# qhasm: mem256[ input_0 + 704 ] = x6
# asm 1: vmovupd <x6=reg256#13,704(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,704(<input_0=%rdi)
vmovupd % ymm12, 704( % rdi)
# qhasm: mem256[ input_0 + 736 ] = x7
# asm 1: vmovupd <x7=reg256#7,736(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,736(<input_0=%rdi)
vmovupd % ymm6, 736( % rdi)
# qhasm: x0 = mem256[ input_0 + 768 ]
# asm 1: vmovupd 768(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 768(<input_0=%rdi),>x0=%ymm6
vmovupd 768( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 800 ]
# asm 1: vmovupd 800(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 800(<input_0=%rdi),>x1=%ymm7
vmovupd 800( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 832 ]
# asm 1: vmovupd 832(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 832(<input_0=%rdi),>x2=%ymm8
vmovupd 832( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 864 ]
# asm 1: vmovupd 864(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 864(<input_0=%rdi),>x3=%ymm9
vmovupd 864( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 896 ]
# asm 1: vmovupd 896(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 896(<input_0=%rdi),>x4=%ymm10
vmovupd 896( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 928 ]
# asm 1: vmovupd 928(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 928(<input_0=%rdi),>x5=%ymm11
vmovupd 928( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 960 ]
# asm 1: vmovupd 960(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 960(<input_0=%rdi),>x6=%ymm12
vmovupd 960( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 992 ]
# asm 1: vmovupd 992(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 992(<input_0=%rdi),>x7=%ymm13
vmovupd 992( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
vpand % ymm10, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
vpand % ymm6, % ymm1, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
vpsrlq $4, % ymm6, % ymm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
vpand % ymm11, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
vpand % ymm7, % ymm1, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
vpsrlq $4, % ymm7, % ymm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
vpand % ymm12, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
vpand % ymm8, % ymm1, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
vpsrlq $4, % ymm8, % ymm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
vpand % ymm13, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
vpand % ymm9, % ymm1, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
vpsrlq $4, % ymm9, % ymm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
vpand % ymm11, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
vpand % ymm14, % ymm3, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
vpsrlq $2, % ymm14, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
vpand % ymm12, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
vpand % ymm10, % ymm3, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
vpsrlq $2, % ymm10, % ymm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
vpand % ymm8, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
vpand % ymm6, % ymm3, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
vpsrlq $2, % ymm6, % ymm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
vpand % ymm9, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
vpand % ymm7, % ymm3, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
vpsrlq $2, % ymm7, % ymm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
vpand % ymm14, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
vpand % ymm13, % ymm5, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
vpsrlq $1, % ymm13, % ymm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
vpand % ymm10, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
vpand % ymm11, % ymm5, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
vpsrlq $1, % ymm11, % ymm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
vpand % ymm8, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
vpand % ymm12, % ymm5, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
vpsrlq $1, % ymm12, % ymm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
vpand % ymm7, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
vpand % ymm6, % ymm5, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
vpsrlq $1, % ymm6, % ymm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 768 ] = x0
# asm 1: vmovupd <x0=reg256#10,768(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,768(<input_0=%rdi)
vmovupd % ymm9, 768( % rdi)
# qhasm: mem256[ input_0 + 800 ] = x1
# asm 1: vmovupd <x1=reg256#14,800(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,800(<input_0=%rdi)
vmovupd % ymm13, 800( % rdi)
# qhasm: mem256[ input_0 + 832 ] = x2
# asm 1: vmovupd <x2=reg256#15,832(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,832(<input_0=%rdi)
vmovupd % ymm14, 832( % rdi)
# qhasm: mem256[ input_0 + 864 ] = x3
# asm 1: vmovupd <x3=reg256#11,864(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,864(<input_0=%rdi)
vmovupd % ymm10, 864( % rdi)
# qhasm: mem256[ input_0 + 896 ] = x4
# asm 1: vmovupd <x4=reg256#12,896(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,896(<input_0=%rdi)
vmovupd % ymm11, 896( % rdi)
# qhasm: mem256[ input_0 + 928 ] = x5
# asm 1: vmovupd <x5=reg256#9,928(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,928(<input_0=%rdi)
vmovupd % ymm8, 928( % rdi)
# qhasm: mem256[ input_0 + 960 ] = x6
# asm 1: vmovupd <x6=reg256#13,960(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,960(<input_0=%rdi)
vmovupd % ymm12, 960( % rdi)
# qhasm: mem256[ input_0 + 992 ] = x7
# asm 1: vmovupd <x7=reg256#7,992(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,992(<input_0=%rdi)
vmovupd % ymm6, 992( % rdi)
# qhasm: x0 = mem256[ input_0 + 1024 ]
# asm 1: vmovupd 1024(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 1024(<input_0=%rdi),>x0=%ymm6
vmovupd 1024( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 1056 ]
# asm 1: vmovupd 1056(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 1056(<input_0=%rdi),>x1=%ymm7
vmovupd 1056( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 1088 ]
# asm 1: vmovupd 1088(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 1088(<input_0=%rdi),>x2=%ymm8
vmovupd 1088( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 1120 ]
# asm 1: vmovupd 1120(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 1120(<input_0=%rdi),>x3=%ymm9
vmovupd 1120( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1152 ]
# asm 1: vmovupd 1152(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1152(<input_0=%rdi),>x4=%ymm10
vmovupd 1152( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1184 ]
# asm 1: vmovupd 1184(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1184(<input_0=%rdi),>x5=%ymm11
vmovupd 1184( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1216 ]
# asm 1: vmovupd 1216(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1216(<input_0=%rdi),>x6=%ymm12
vmovupd 1216( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 1248 ]
# asm 1: vmovupd 1248(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 1248(<input_0=%rdi),>x7=%ymm13
vmovupd 1248( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
vpand % ymm10, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
vpand % ymm6, % ymm1, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
vpsrlq $4, % ymm6, % ymm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
vpand % ymm11, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
vpand % ymm7, % ymm1, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
vpsrlq $4, % ymm7, % ymm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
vpand % ymm12, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
vpand % ymm8, % ymm1, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
vpsrlq $4, % ymm8, % ymm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
vpand % ymm13, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
vpand % ymm9, % ymm1, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
vpsrlq $4, % ymm9, % ymm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
vpand % ymm11, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
vpand % ymm14, % ymm3, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
vpsrlq $2, % ymm14, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
vpand % ymm12, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
vpand % ymm10, % ymm3, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
vpsrlq $2, % ymm10, % ymm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
vpand % ymm8, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
vpand % ymm6, % ymm3, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
vpsrlq $2, % ymm6, % ymm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
vpand % ymm9, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
vpand % ymm7, % ymm3, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
vpsrlq $2, % ymm7, % ymm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
vpand % ymm14, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
vpand % ymm13, % ymm5, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
vpsrlq $1, % ymm13, % ymm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
vpand % ymm10, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
vpand % ymm11, % ymm5, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
vpsrlq $1, % ymm11, % ymm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
vpand % ymm8, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
vpand % ymm12, % ymm5, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
vpsrlq $1, % ymm12, % ymm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
vpand % ymm7, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
vpand % ymm6, % ymm5, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
vpsrlq $1, % ymm6, % ymm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 1024 ] = x0
# asm 1: vmovupd <x0=reg256#10,1024(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,1024(<input_0=%rdi)
vmovupd % ymm9, 1024( % rdi)
# qhasm: mem256[ input_0 + 1056 ] = x1
# asm 1: vmovupd <x1=reg256#14,1056(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,1056(<input_0=%rdi)
vmovupd % ymm13, 1056( % rdi)
# qhasm: mem256[ input_0 + 1088 ] = x2
# asm 1: vmovupd <x2=reg256#15,1088(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,1088(<input_0=%rdi)
vmovupd % ymm14, 1088( % rdi)
# qhasm: mem256[ input_0 + 1120 ] = x3
# asm 1: vmovupd <x3=reg256#11,1120(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,1120(<input_0=%rdi)
vmovupd % ymm10, 1120( % rdi)
# qhasm: mem256[ input_0 + 1152 ] = x4
# asm 1: vmovupd <x4=reg256#12,1152(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,1152(<input_0=%rdi)
vmovupd % ymm11, 1152( % rdi)
# qhasm: mem256[ input_0 + 1184 ] = x5
# asm 1: vmovupd <x5=reg256#9,1184(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,1184(<input_0=%rdi)
vmovupd % ymm8, 1184( % rdi)
# qhasm: mem256[ input_0 + 1216 ] = x6
# asm 1: vmovupd <x6=reg256#13,1216(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,1216(<input_0=%rdi)
vmovupd % ymm12, 1216( % rdi)
# qhasm: mem256[ input_0 + 1248 ] = x7
# asm 1: vmovupd <x7=reg256#7,1248(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,1248(<input_0=%rdi)
vmovupd % ymm6, 1248( % rdi)
# qhasm: x0 = mem256[ input_0 + 1280 ]
# asm 1: vmovupd 1280(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 1280(<input_0=%rdi),>x0=%ymm6
vmovupd 1280( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 1312 ]
# asm 1: vmovupd 1312(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 1312(<input_0=%rdi),>x1=%ymm7
vmovupd 1312( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 1344 ]
# asm 1: vmovupd 1344(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 1344(<input_0=%rdi),>x2=%ymm8
vmovupd 1344( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 1376 ]
# asm 1: vmovupd 1376(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 1376(<input_0=%rdi),>x3=%ymm9
vmovupd 1376( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1408 ]
# asm 1: vmovupd 1408(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1408(<input_0=%rdi),>x4=%ymm10
vmovupd 1408( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1440 ]
# asm 1: vmovupd 1440(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1440(<input_0=%rdi),>x5=%ymm11
vmovupd 1440( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1472 ]
# asm 1: vmovupd 1472(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1472(<input_0=%rdi),>x6=%ymm12
vmovupd 1472( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 1504 ]
# asm 1: vmovupd 1504(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 1504(<input_0=%rdi),>x7=%ymm13
vmovupd 1504( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
vpand % ymm10, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
vpand % ymm6, % ymm1, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
vpsrlq $4, % ymm6, % ymm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
vpand % ymm11, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
vpand % ymm7, % ymm1, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
vpsrlq $4, % ymm7, % ymm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
vpand % ymm12, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
vpand % ymm8, % ymm1, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
vpsrlq $4, % ymm8, % ymm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
vpand % ymm13, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
vpand % ymm9, % ymm1, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
vpsrlq $4, % ymm9, % ymm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
vpand % ymm11, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
vpand % ymm14, % ymm3, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
vpsrlq $2, % ymm14, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
vpand % ymm12, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
vpand % ymm10, % ymm3, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
vpsrlq $2, % ymm10, % ymm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
vpand % ymm8, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
vpand % ymm6, % ymm3, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
vpsrlq $2, % ymm6, % ymm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
vpand % ymm9, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
vpand % ymm7, % ymm3, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
vpsrlq $2, % ymm7, % ymm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
vpand % ymm14, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
vpand % ymm13, % ymm5, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
vpsrlq $1, % ymm13, % ymm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
vpand % ymm10, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
vpand % ymm11, % ymm5, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
vpsrlq $1, % ymm11, % ymm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
vpand % ymm8, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
vpand % ymm12, % ymm5, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
vpsrlq $1, % ymm12, % ymm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
vpand % ymm7, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
vpand % ymm6, % ymm5, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
vpsrlq $1, % ymm6, % ymm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 1280 ] = x0
# asm 1: vmovupd <x0=reg256#10,1280(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,1280(<input_0=%rdi)
vmovupd % ymm9, 1280( % rdi)
# qhasm: mem256[ input_0 + 1312 ] = x1
# asm 1: vmovupd <x1=reg256#14,1312(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,1312(<input_0=%rdi)
vmovupd % ymm13, 1312( % rdi)
# qhasm: mem256[ input_0 + 1344 ] = x2
# asm 1: vmovupd <x2=reg256#15,1344(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,1344(<input_0=%rdi)
vmovupd % ymm14, 1344( % rdi)
# qhasm: mem256[ input_0 + 1376 ] = x3
# asm 1: vmovupd <x3=reg256#11,1376(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,1376(<input_0=%rdi)
vmovupd % ymm10, 1376( % rdi)
# qhasm: mem256[ input_0 + 1408 ] = x4
# asm 1: vmovupd <x4=reg256#12,1408(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,1408(<input_0=%rdi)
vmovupd % ymm11, 1408( % rdi)
# qhasm: mem256[ input_0 + 1440 ] = x5
# asm 1: vmovupd <x5=reg256#9,1440(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,1440(<input_0=%rdi)
vmovupd % ymm8, 1440( % rdi)
# qhasm: mem256[ input_0 + 1472 ] = x6
# asm 1: vmovupd <x6=reg256#13,1472(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,1472(<input_0=%rdi)
vmovupd % ymm12, 1472( % rdi)
# qhasm: mem256[ input_0 + 1504 ] = x7
# asm 1: vmovupd <x7=reg256#7,1504(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,1504(<input_0=%rdi)
vmovupd % ymm6, 1504( % rdi)
# qhasm: x0 = mem256[ input_0 + 1536 ]
# asm 1: vmovupd 1536(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 1536(<input_0=%rdi),>x0=%ymm6
vmovupd 1536( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 1568 ]
# asm 1: vmovupd 1568(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 1568(<input_0=%rdi),>x1=%ymm7
vmovupd 1568( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 1600 ]
# asm 1: vmovupd 1600(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 1600(<input_0=%rdi),>x2=%ymm8
vmovupd 1600( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 1632 ]
# asm 1: vmovupd 1632(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 1632(<input_0=%rdi),>x3=%ymm9
vmovupd 1632( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1664 ]
# asm 1: vmovupd 1664(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1664(<input_0=%rdi),>x4=%ymm10
vmovupd 1664( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1696 ]
# asm 1: vmovupd 1696(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1696(<input_0=%rdi),>x5=%ymm11
vmovupd 1696( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1728 ]
# asm 1: vmovupd 1728(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1728(<input_0=%rdi),>x6=%ymm12
vmovupd 1728( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 1760 ]
# asm 1: vmovupd 1760(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 1760(<input_0=%rdi),>x7=%ymm13
vmovupd 1760( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
vpand % ymm10, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
vpand % ymm6, % ymm1, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
vpsrlq $4, % ymm6, % ymm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
vpand % ymm11, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
vpand % ymm7, % ymm1, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
vpsrlq $4, % ymm7, % ymm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
vpand % ymm12, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
vpand % ymm8, % ymm1, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
vpsrlq $4, % ymm8, % ymm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
vpand % ymm13, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x3 & mask1
# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
vpand % ymm9, % ymm1, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
vpand % ymm13, % ymm1, % ymm13
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
vpsrlq $4, % ymm9, % ymm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
# asm 2: vpor <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
vpor % ymm9, % ymm13, % ymm9
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
vpand % ymm14, % ymm2, % ymm13
# qhasm: v10 = x2 & mask2
# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
vpand % ymm11, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x0 & mask3
# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
vpand % ymm14, % ymm3, % ymm14
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
vpsrlq $2, % ymm14, % ymm14
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
# asm 2: vpor <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
vpor % ymm13, % ymm15, % ymm13
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
vpor % ymm14, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
vpand % ymm10, % ymm2, % ymm14
# qhasm: v10 = x3 & mask2
# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
vpand % ymm12, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x1 & mask3
# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
vpand % ymm10, % ymm3, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
vpand % ymm12, % ymm3, % ymm12
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
vpsrlq $2, % ymm10, % ymm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
# asm 2: vpor <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
vpor % ymm10, % ymm12, % ymm10
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
vpand % ymm6, % ymm2, % ymm12
# qhasm: v10 = x6 & mask2
# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
vpand % ymm8, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x4 & mask3
# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
vpand % ymm6, % ymm3, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
vpsrlq $2, % ymm6, % ymm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
vpand % ymm9, % ymm2, % ymm15
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
vpsllq $2, % ymm15, % ymm15
# qhasm: v01 = x5 & mask3
# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
vpand % ymm7, % ymm3, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
vpand % ymm9, % ymm3, % ymm9
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
vpsrlq $2, % ymm7, % ymm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
vpor % ymm8, % ymm15, % ymm8
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
vpor % ymm7, % ymm9, % ymm7
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
vpand % ymm13, % ymm4, % ymm9
# qhasm: v10 = x1 & mask4
# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
vpand % ymm14, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x0 & mask5
# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
vpand % ymm13, % ymm5, % ymm13
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
vpand % ymm14, % ymm5, % ymm14
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
vpsrlq $1, % ymm13, % ymm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
vpor % ymm9, % ymm15, % ymm9
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
# asm 2: vpor <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
vpor % ymm13, % ymm14, % ymm13
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
vpand % ymm11, % ymm4, % ymm14
# qhasm: v10 = x3 & mask4
# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
vpand % ymm10, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x2 & mask5
# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
vpand % ymm11, % ymm5, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
vpsrlq $1, % ymm11, % ymm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
# asm 2: vpor <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
vpor % ymm11, % ymm10, % ymm10
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
vpand % ymm12, % ymm4, % ymm11
# qhasm: v10 = x5 & mask4
# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
vpand % ymm8, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x4 & mask5
# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
vpand % ymm12, % ymm5, % ymm12
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
vpand % ymm8, % ymm5, % ymm8
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
vpsrlq $1, % ymm12, % ymm12
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
# asm 2: vpor <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
vpor % ymm12, % ymm8, % ymm8
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
vpand % ymm6, % ymm4, % ymm12
# qhasm: v10 = x7 & mask4
# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
vpand % ymm7, % ymm4, % ymm15
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
vpsllq $1, % ymm15, % ymm15
# qhasm: v01 = x6 & mask5
# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
vpand % ymm6, % ymm5, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
vpand % ymm7, % ymm5, % ymm7
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
vpsrlq $1, % ymm6, % ymm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
vpor % ymm12, % ymm15, % ymm12
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
vpor % ymm6, % ymm7, % ymm6
# qhasm: mem256[ input_0 + 1536 ] = x0
# asm 1: vmovupd <x0=reg256#10,1536(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm9,1536(<input_0=%rdi)
vmovupd % ymm9, 1536( % rdi)
# qhasm: mem256[ input_0 + 1568 ] = x1
# asm 1: vmovupd <x1=reg256#14,1568(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm13,1568(<input_0=%rdi)
vmovupd % ymm13, 1568( % rdi)
# qhasm: mem256[ input_0 + 1600 ] = x2
# asm 1: vmovupd <x2=reg256#15,1600(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm14,1600(<input_0=%rdi)
vmovupd % ymm14, 1600( % rdi)
# qhasm: mem256[ input_0 + 1632 ] = x3
# asm 1: vmovupd <x3=reg256#11,1632(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm10,1632(<input_0=%rdi)
vmovupd % ymm10, 1632( % rdi)
# qhasm: mem256[ input_0 + 1664 ] = x4
# asm 1: vmovupd <x4=reg256#12,1664(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm11,1664(<input_0=%rdi)
vmovupd % ymm11, 1664( % rdi)
# qhasm: mem256[ input_0 + 1696 ] = x5
# asm 1: vmovupd <x5=reg256#9,1696(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm8,1696(<input_0=%rdi)
vmovupd % ymm8, 1696( % rdi)
# qhasm: mem256[ input_0 + 1728 ] = x6
# asm 1: vmovupd <x6=reg256#13,1728(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm12,1728(<input_0=%rdi)
vmovupd % ymm12, 1728( % rdi)
# qhasm: mem256[ input_0 + 1760 ] = x7
# asm 1: vmovupd <x7=reg256#7,1760(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm6,1760(<input_0=%rdi)
vmovupd % ymm6, 1760( % rdi)
# qhasm: x0 = mem256[ input_0 + 1792 ]
# asm 1: vmovupd 1792(<input_0=int64#1),>x0=reg256#7
# asm 2: vmovupd 1792(<input_0=%rdi),>x0=%ymm6
vmovupd 1792( % rdi), % ymm6
# qhasm: x1 = mem256[ input_0 + 1824 ]
# asm 1: vmovupd 1824(<input_0=int64#1),>x1=reg256#8
# asm 2: vmovupd 1824(<input_0=%rdi),>x1=%ymm7
vmovupd 1824( % rdi), % ymm7
# qhasm: x2 = mem256[ input_0 + 1856 ]
# asm 1: vmovupd 1856(<input_0=int64#1),>x2=reg256#9
# asm 2: vmovupd 1856(<input_0=%rdi),>x2=%ymm8
vmovupd 1856( % rdi), % ymm8
# qhasm: x3 = mem256[ input_0 + 1888 ]
# asm 1: vmovupd 1888(<input_0=int64#1),>x3=reg256#10
# asm 2: vmovupd 1888(<input_0=%rdi),>x3=%ymm9
vmovupd 1888( % rdi), % ymm9
# qhasm: x4 = mem256[ input_0 + 1920 ]
# asm 1: vmovupd 1920(<input_0=int64#1),>x4=reg256#11
# asm 2: vmovupd 1920(<input_0=%rdi),>x4=%ymm10
vmovupd 1920( % rdi), % ymm10
# qhasm: x5 = mem256[ input_0 + 1952 ]
# asm 1: vmovupd 1952(<input_0=int64#1),>x5=reg256#12
# asm 2: vmovupd 1952(<input_0=%rdi),>x5=%ymm11
vmovupd 1952( % rdi), % ymm11
# qhasm: x6 = mem256[ input_0 + 1984 ]
# asm 1: vmovupd 1984(<input_0=int64#1),>x6=reg256#13
# asm 2: vmovupd 1984(<input_0=%rdi),>x6=%ymm12
vmovupd 1984( % rdi), % ymm12
# qhasm: x7 = mem256[ input_0 + 2016 ]
# asm 1: vmovupd 2016(<input_0=int64#1),>x7=reg256#14
# asm 2: vmovupd 2016(<input_0=%rdi),>x7=%ymm13
vmovupd 2016( % rdi), % ymm13
# qhasm: v00 = x0 & mask0
# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
vpand % ymm6, % ymm0, % ymm14
# qhasm: v10 = x4 & mask0
# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
vpand % ymm10, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x0 & mask1
# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
vpand % ymm6, % ymm1, % ymm6
# qhasm: v11 = x4 & mask1
# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
vpand % ymm10, % ymm1, % ymm10
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
vpsrlq $4, % ymm6, % ymm6
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
# asm 2: vpor <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
vpor % ymm14, % ymm15, % ymm14
# qhasm: x4 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
vpor % ymm6, % ymm10, % ymm6
# qhasm: v00 = x1 & mask0
# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
vpand % ymm7, % ymm0, % ymm10
# qhasm: v10 = x5 & mask0
# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
vpand % ymm11, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x1 & mask1
# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
vpand % ymm7, % ymm1, % ymm7
# qhasm: v11 = x5 & mask1
# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
vpand % ymm11, % ymm1, % ymm11
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
vpsrlq $4, % ymm7, % ymm7
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
vpor % ymm10, % ymm15, % ymm10
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
# asm 2: vpor <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
vpor % ymm7, % ymm11, % ymm7
# qhasm: v00 = x2 & mask0
# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
vpand % ymm8, % ymm0, % ymm11
# qhasm: v10 = x6 & mask0
# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
vpand % ymm12, % ymm0, % ymm15
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
vpsllq $4, % ymm15, % ymm15
# qhasm: v01 = x2 & mask1
# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
vpand % ymm8, % ymm1, % ymm8
# qhasm: v11 = x6 & mask1
# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
vpand % ymm12, % ymm1, % ymm12
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
vpsrlq $4, % ymm8, % ymm8
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
# asm 2: vpor <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
vpor % ymm11, % ymm15, % ymm11
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
# asm 2: vpor <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
vpor % ymm8, % ymm12, % ymm8
# qhasm: v00 = x3 & mask0
# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
vpand % ymm9, % ymm0, % ymm12
# qhasm: v10 = x7 & mask0
# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1
# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0
vpand % ymm13, % ymm0, % ymm0
# qhasm: 4x v10 <<= 4
# asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1
# asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0
vpsllq $4, % ymm0, % ymm0
# qhasm: v01 = x3 & mask1
# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
vpand % ymm9, % ymm1, % ymm9
# qhasm: v11 = x7 & mask1
# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2
# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1
vpand % ymm13, % ymm1, % ymm1
# qhasm: 4x v01 unsigned>>= 4
# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
vpsrlq $4, % ymm9, % ymm9
# qhasm: x3 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#1,>x3=reg256#1
# asm 2: vpor <v00=%ymm12,<v10=%ymm0,>x3=%ymm0
vpor % ymm12, % ymm0, % ymm0
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#10,<v11=reg256#2,>x7=reg256#2
# asm 2: vpor <v01=%ymm9,<v11=%ymm1,>x7=%ymm1
vpor % ymm9, % ymm1, % ymm1
# qhasm: v00 = x0 & mask2
# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10
# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9
vpand % ymm14, % ymm2, % ymm9
# qhasm: v10 = x2 & mask2
# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13
# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12
vpand % ymm11, % ymm2, % ymm12
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13
# asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12
vpsllq $2, % ymm12, % ymm12
# qhasm: v01 = x0 & mask3
# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14
# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13
vpand % ymm14, % ymm3, % ymm13
# qhasm: v11 = x2 & mask3
# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
vpand % ymm11, % ymm3, % ymm11
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14
# asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13
vpsrlq $2, % ymm13, % ymm13
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#13,>x0=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm12,>x0=%ymm9
vpor % ymm9, % ymm12, % ymm9
# qhasm: x2 = v01 | v11
# asm 1: vpor <v01=reg256#14,<v11=reg256#12,>x2=reg256#12
# asm 2: vpor <v01=%ymm13,<v11=%ymm11,>x2=%ymm11
vpor % ymm13, % ymm11, % ymm11
# qhasm: v00 = x1 & mask2
# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13
# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12
vpand % ymm10, % ymm2, % ymm12
# qhasm: v10 = x3 & mask2
# asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14
# asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13
vpand % ymm0, % ymm2, % ymm13
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14
# asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13
vpsllq $2, % ymm13, % ymm13
# qhasm: v01 = x1 & mask3
# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
vpand % ymm10, % ymm3, % ymm10
# qhasm: v11 = x3 & mask3
# asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1
# asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0
vpand % ymm0, % ymm3, % ymm0
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
vpsrlq $2, % ymm10, % ymm10
# qhasm: x1 = v00 | v10
# asm 1: vpor <v00=reg256#13,<v10=reg256#14,>x1=reg256#13
# asm 2: vpor <v00=%ymm12,<v10=%ymm13,>x1=%ymm12
vpor % ymm12, % ymm13, % ymm12
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#1,>x3=reg256#1
# asm 2: vpor <v01=%ymm10,<v11=%ymm0,>x3=%ymm0
vpor % ymm10, % ymm0, % ymm0
# qhasm: v00 = x4 & mask2
# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11
# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10
vpand % ymm6, % ymm2, % ymm10
# qhasm: v10 = x6 & mask2
# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14
# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13
vpand % ymm8, % ymm2, % ymm13
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14
# asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13
vpsllq $2, % ymm13, % ymm13
# qhasm: v01 = x4 & mask3
# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
vpand % ymm6, % ymm3, % ymm6
# qhasm: v11 = x6 & mask3
# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
vpand % ymm8, % ymm3, % ymm8
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
vpsrlq $2, % ymm6, % ymm6
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#14,>x4=reg256#11
# asm 2: vpor <v00=%ymm10,<v10=%ymm13,>x4=%ymm10
vpor % ymm10, % ymm13, % ymm10
# qhasm: x6 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
# asm 2: vpor <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
vpor % ymm6, % ymm8, % ymm6
# qhasm: v00 = x5 & mask2
# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
vpand % ymm7, % ymm2, % ymm8
# qhasm: v10 = x7 & mask2
# asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3
# asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2
vpand % ymm1, % ymm2, % ymm2
# qhasm: 4x v10 <<= 2
# asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3
# asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2
vpsllq $2, % ymm2, % ymm2
# qhasm: v01 = x5 & mask3
# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
vpand % ymm7, % ymm3, % ymm7
# qhasm: v11 = x7 & mask3
# asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2
# asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1
vpand % ymm1, % ymm3, % ymm1
# qhasm: 4x v01 unsigned>>= 2
# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
vpsrlq $2, % ymm7, % ymm7
# qhasm: x5 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#3,>x5=reg256#3
# asm 2: vpor <v00=%ymm8,<v10=%ymm2,>x5=%ymm2
vpor % ymm8, % ymm2, % ymm2
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#8,<v11=reg256#2,>x7=reg256#2
# asm 2: vpor <v01=%ymm7,<v11=%ymm1,>x7=%ymm1
vpor % ymm7, % ymm1, % ymm1
# qhasm: v00 = x0 & mask4
# asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4
# asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3
vpand % ymm9, % ymm4, % ymm3
# qhasm: v10 = x1 & mask4
# asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8
# asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7
vpand % ymm12, % ymm4, % ymm7
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8
# asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7
vpsllq $1, % ymm7, % ymm7
# qhasm: v01 = x0 & mask5
# asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9
# asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8
vpand % ymm9, % ymm5, % ymm8
# qhasm: v11 = x1 & mask5
# asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10
# asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9
vpand % ymm12, % ymm5, % ymm9
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9
# asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8
vpsrlq $1, % ymm8, % ymm8
# qhasm: x0 = v00 | v10
# asm 1: vpor <v00=reg256#4,<v10=reg256#8,>x0=reg256#4
# asm 2: vpor <v00=%ymm3,<v10=%ymm7,>x0=%ymm3
vpor % ymm3, % ymm7, % ymm3
# qhasm: x1 = v01 | v11
# asm 1: vpor <v01=reg256#9,<v11=reg256#10,>x1=reg256#8
# asm 2: vpor <v01=%ymm8,<v11=%ymm9,>x1=%ymm7
vpor % ymm8, % ymm9, % ymm7
# qhasm: v00 = x2 & mask4
# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9
# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8
vpand % ymm11, % ymm4, % ymm8
# qhasm: v10 = x3 & mask4
# asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10
# asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9
vpand % ymm0, % ymm4, % ymm9
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10
# asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9
vpsllq $1, % ymm9, % ymm9
# qhasm: v01 = x2 & mask5
# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
vpand % ymm11, % ymm5, % ymm11
# qhasm: v11 = x3 & mask5
# asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1
# asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0
vpand % ymm0, % ymm5, % ymm0
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
vpsrlq $1, % ymm11, % ymm11
# qhasm: x2 = v00 | v10
# asm 1: vpor <v00=reg256#9,<v10=reg256#10,>x2=reg256#9
# asm 2: vpor <v00=%ymm8,<v10=%ymm9,>x2=%ymm8
vpor % ymm8, % ymm9, % ymm8
# qhasm: x3 = v01 | v11
# asm 1: vpor <v01=reg256#12,<v11=reg256#1,>x3=reg256#1
# asm 2: vpor <v01=%ymm11,<v11=%ymm0,>x3=%ymm0
vpor % ymm11, % ymm0, % ymm0
# qhasm: v00 = x4 & mask4
# asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10
# asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9
vpand % ymm10, % ymm4, % ymm9
# qhasm: v10 = x5 & mask4
# asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12
# asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11
vpand % ymm2, % ymm4, % ymm11
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12
# asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11
vpsllq $1, % ymm11, % ymm11
# qhasm: v01 = x4 & mask5
# asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11
# asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10
vpand % ymm10, % ymm5, % ymm10
# qhasm: v11 = x5 & mask5
# asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3
# asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2
vpand % ymm2, % ymm5, % ymm2
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11
# asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10
vpsrlq $1, % ymm10, % ymm10
# qhasm: x4 = v00 | v10
# asm 1: vpor <v00=reg256#10,<v10=reg256#12,>x4=reg256#10
# asm 2: vpor <v00=%ymm9,<v10=%ymm11,>x4=%ymm9
vpor % ymm9, % ymm11, % ymm9
# qhasm: x5 = v01 | v11
# asm 1: vpor <v01=reg256#11,<v11=reg256#3,>x5=reg256#3
# asm 2: vpor <v01=%ymm10,<v11=%ymm2,>x5=%ymm2
vpor % ymm10, % ymm2, % ymm2
# qhasm: v00 = x6 & mask4
# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11
# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10
vpand % ymm6, % ymm4, % ymm10
# qhasm: v10 = x7 & mask4
# asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5
# asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4
vpand % ymm1, % ymm4, % ymm4
# qhasm: 4x v10 <<= 1
# asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5
# asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4
vpsllq $1, % ymm4, % ymm4
# qhasm: v01 = x6 & mask5
# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
vpand % ymm6, % ymm5, % ymm6
# qhasm: v11 = x7 & mask5
# asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2
# asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1
vpand % ymm1, % ymm5, % ymm1
# qhasm: 4x v01 unsigned>>= 1
# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
vpsrlq $1, % ymm6, % ymm6
# qhasm: x6 = v00 | v10
# asm 1: vpor <v00=reg256#11,<v10=reg256#5,>x6=reg256#5
# asm 2: vpor <v00=%ymm10,<v10=%ymm4,>x6=%ymm4
vpor % ymm10, % ymm4, % ymm4
# qhasm: x7 = v01 | v11
# asm 1: vpor <v01=reg256#7,<v11=reg256#2,>x7=reg256#2
# asm 2: vpor <v01=%ymm6,<v11=%ymm1,>x7=%ymm1
vpor % ymm6, % ymm1, % ymm1
# qhasm: mem256[ input_0 + 1792 ] = x0
# asm 1: vmovupd <x0=reg256#4,1792(<input_0=int64#1)
# asm 2: vmovupd <x0=%ymm3,1792(<input_0=%rdi)
vmovupd % ymm3, 1792( % rdi)
# qhasm: mem256[ input_0 + 1824 ] = x1
# asm 1: vmovupd <x1=reg256#8,1824(<input_0=int64#1)
# asm 2: vmovupd <x1=%ymm7,1824(<input_0=%rdi)
vmovupd % ymm7, 1824( % rdi)
# qhasm: mem256[ input_0 + 1856 ] = x2
# asm 1: vmovupd <x2=reg256#9,1856(<input_0=int64#1)
# asm 2: vmovupd <x2=%ymm8,1856(<input_0=%rdi)
vmovupd % ymm8, 1856( % rdi)
# qhasm: mem256[ input_0 + 1888 ] = x3
# asm 1: vmovupd <x3=reg256#1,1888(<input_0=int64#1)
# asm 2: vmovupd <x3=%ymm0,1888(<input_0=%rdi)
vmovupd % ymm0, 1888( % rdi)
# qhasm: mem256[ input_0 + 1920 ] = x4
# asm 1: vmovupd <x4=reg256#10,1920(<input_0=int64#1)
# asm 2: vmovupd <x4=%ymm9,1920(<input_0=%rdi)
vmovupd % ymm9, 1920( % rdi)
# qhasm: mem256[ input_0 + 1952 ] = x5
# asm 1: vmovupd <x5=reg256#3,1952(<input_0=int64#1)
# asm 2: vmovupd <x5=%ymm2,1952(<input_0=%rdi)
vmovupd % ymm2, 1952( % rdi)
# qhasm: mem256[ input_0 + 1984 ] = x6
# asm 1: vmovupd <x6=reg256#5,1984(<input_0=int64#1)
# asm 2: vmovupd <x6=%ymm4,1984(<input_0=%rdi)
vmovupd % ymm4, 1984( % rdi)
# qhasm: mem256[ input_0 + 2016 ] = x7
# asm 1: vmovupd <x7=reg256#2,2016(<input_0=int64#1)
# asm 2: vmovupd <x7=%ymm1,2016(<input_0=%rdi)
vmovupd % ymm1, 2016( % rdi)
# qhasm: return
add % r11, % rsp
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 76,827
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_kem/mceliece8192128/avx2/vec256_maa_asm.S
|
#include "namespace.h"
#define vec256_maa_asm CRYPTO_NAMESPACE(vec256_maa_asm)
#define _vec256_maa_asm _CRYPTO_NAMESPACE(vec256_maa_asm)
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: reg256 a0
# qhasm: reg256 a1
# qhasm: reg256 a2
# qhasm: reg256 a3
# qhasm: reg256 a4
# qhasm: reg256 a5
# qhasm: reg256 a6
# qhasm: reg256 a7
# qhasm: reg256 a8
# qhasm: reg256 a9
# qhasm: reg256 a10
# qhasm: reg256 a11
# qhasm: reg256 a12
# qhasm: reg256 b0
# qhasm: reg256 b1
# qhasm: reg256 r0
# qhasm: reg256 r1
# qhasm: reg256 r2
# qhasm: reg256 r3
# qhasm: reg256 r4
# qhasm: reg256 r5
# qhasm: reg256 r6
# qhasm: reg256 r7
# qhasm: reg256 r8
# qhasm: reg256 r9
# qhasm: reg256 r10
# qhasm: reg256 r11
# qhasm: reg256 r12
# qhasm: reg256 r13
# qhasm: reg256 r14
# qhasm: reg256 r15
# qhasm: reg256 r16
# qhasm: reg256 r17
# qhasm: reg256 r18
# qhasm: reg256 r19
# qhasm: reg256 r20
# qhasm: reg256 r21
# qhasm: reg256 r22
# qhasm: reg256 r23
# qhasm: reg256 r24
# qhasm: reg256 r
# qhasm: enter vec256_maa_asm
.p2align 5
.global _vec256_maa_asm
.global vec256_maa_asm
_vec256_maa_asm:
vec256_maa_asm:
mov % rsp, % r11
and $31, % r11
add $0, % r11
sub % r11, % rsp
# qhasm: b0 = mem256[ input_2 + 0 ]
# asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1
# asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0
vmovupd 0( % rdx), % ymm0
# qhasm: a12 = mem256[ input_1 + 384 ]
# asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2
# asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1
vmovupd 384( % rsi), % ymm1
# qhasm: r12 = a12 & b0
# asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3
# asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2
vpand % ymm1, % ymm0, % ymm2
# qhasm: r13 = a12 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4
# asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3
vpand 32( % rdx), % ymm1, % ymm3
# qhasm: r14 = a12 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5
# asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4
vpand 64( % rdx), % ymm1, % ymm4
# qhasm: r15 = a12 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6
# asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5
vpand 96( % rdx), % ymm1, % ymm5
# qhasm: r16 = a12 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7
# asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6
vpand 128( % rdx), % ymm1, % ymm6
# qhasm: r17 = a12 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8
# asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7
vpand 160( % rdx), % ymm1, % ymm7
# qhasm: r18 = a12 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9
# asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8
vpand 192( % rdx), % ymm1, % ymm8
# qhasm: r19 = a12 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10
# asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9
vpand 224( % rdx), % ymm1, % ymm9
# qhasm: r20 = a12 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11
# asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10
vpand 256( % rdx), % ymm1, % ymm10
# qhasm: r21 = a12 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12
# asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11
vpand 288( % rdx), % ymm1, % ymm11
# qhasm: r22 = a12 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13
# asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12
vpand 320( % rdx), % ymm1, % ymm12
# qhasm: r23 = a12 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14
# asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13
vpand 352( % rdx), % ymm1, % ymm13
# qhasm: r24 = a12 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2
# asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1
vpand 384( % rdx), % ymm1, % ymm1
# qhasm: r15 ^= r24
# asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5
vpxor % ymm1, % ymm5, % ymm5
# qhasm: r14 ^= r24
# asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4
vpxor % ymm1, % ymm4, % ymm4
# qhasm: r12 ^= r24
# asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2
vpxor % ymm1, % ymm2, % ymm2
# qhasm: r11 = r24
# asm 1: vmovapd <r24=reg256#2,>r11=reg256#2
# asm 2: vmovapd <r24=%ymm1,>r11=%ymm1
vmovapd % ymm1, % ymm1
# qhasm: a11 = mem256[ input_1 + 352 ]
# asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15
# asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14
vmovupd 352( % rsi), % ymm14
# qhasm: r = a11 & b0
# asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a11 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a11 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a11 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a11 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a11 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a11 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a11 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a11 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a11 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a11 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r21 ^= r
# asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12
# asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a11 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r22 ^= r
# asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13
# asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a11 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r23 ^= r
# asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14
# asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13
vpxor % ymm14, % ymm13, % ymm13
# qhasm: r14 ^= r23
# asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4
vpxor % ymm13, % ymm4, % ymm4
# qhasm: r13 ^= r23
# asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3
vpxor % ymm13, % ymm3, % ymm3
# qhasm: r11 ^= r23
# asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1
vpxor % ymm13, % ymm1, % ymm1
# qhasm: r10 = r23
# asm 1: vmovapd <r23=reg256#14,>r10=reg256#14
# asm 2: vmovapd <r23=%ymm13,>r10=%ymm13
vmovapd % ymm13, % ymm13
# qhasm: a10 = mem256[ input_1 + 320 ]
# asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15
# asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14
vmovupd 320( % rsi), % ymm14
# qhasm: r = a10 & b0
# asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a10 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a10 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a10 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a10 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a10 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a10 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a10 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a10 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a10 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a10 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a10 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r21 ^= r
# asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12
# asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a10 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r22 ^= r
# asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13
# asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12
vpxor % ymm14, % ymm12, % ymm12
# qhasm: r13 ^= r22
# asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3
vpxor % ymm12, % ymm3, % ymm3
# qhasm: r12 ^= r22
# asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2
vpxor % ymm12, % ymm2, % ymm2
# qhasm: r10 ^= r22
# asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13
vpxor % ymm12, % ymm13, % ymm13
# qhasm: r9 = r22
# asm 1: vmovapd <r22=reg256#13,>r9=reg256#13
# asm 2: vmovapd <r22=%ymm12,>r9=%ymm12
vmovapd % ymm12, % ymm12
# qhasm: a9 = mem256[ input_1 + 288 ]
# asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15
# asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14
vmovupd 288( % rsi), % ymm14
# qhasm: r = a9 & b0
# asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a9 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a9 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a9 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a9 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a9 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a9 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a9 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a9 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a9 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a9 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a9 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a9 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r21 ^= r
# asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12
# asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11
vpxor % ymm14, % ymm11, % ymm11
# qhasm: r12 ^= r21
# asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2
vpxor % ymm11, % ymm2, % ymm2
# qhasm: r11 ^= r21
# asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1
vpxor % ymm11, % ymm1, % ymm1
# qhasm: r9 ^= r21
# asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12
vpxor % ymm11, % ymm12, % ymm12
# qhasm: r8 = r21
# asm 1: vmovapd <r21=reg256#12,>r8=reg256#12
# asm 2: vmovapd <r21=%ymm11,>r8=%ymm11
vmovapd % ymm11, % ymm11
# qhasm: a8 = mem256[ input_1 + 256 ]
# asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15
# asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14
vmovupd 256( % rsi), % ymm14
# qhasm: r = a8 & b0
# asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a8 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a8 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a8 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a8 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a8 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a8 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a8 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a8 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a8 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a8 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a8 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a8 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r20 ^= r
# asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11
# asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10
vpxor % ymm14, % ymm10, % ymm10
# qhasm: r11 ^= r20
# asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1
vpxor % ymm10, % ymm1, % ymm1
# qhasm: r10 ^= r20
# asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13
vpxor % ymm10, % ymm13, % ymm13
# qhasm: r8 ^= r20
# asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11
vpxor % ymm10, % ymm11, % ymm11
# qhasm: r7 = r20
# asm 1: vmovapd <r20=reg256#11,>r7=reg256#11
# asm 2: vmovapd <r20=%ymm10,>r7=%ymm10
vmovapd % ymm10, % ymm10
# qhasm: a7 = mem256[ input_1 + 224 ]
# asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15
# asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14
vmovupd 224( % rsi), % ymm14
# qhasm: r = a7 & b0
# asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a7 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a7 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a7 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a7 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a7 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a7 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a7 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a7 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a7 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a7 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a7 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a7 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r19 ^= r
# asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10
# asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9
vpxor % ymm14, % ymm9, % ymm9
# qhasm: r10 ^= r19
# asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13
vpxor % ymm9, % ymm13, % ymm13
# qhasm: r9 ^= r19
# asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12
vpxor % ymm9, % ymm12, % ymm12
# qhasm: r7 ^= r19
# asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10
vpxor % ymm9, % ymm10, % ymm10
# qhasm: r6 = r19
# asm 1: vmovapd <r19=reg256#10,>r6=reg256#10
# asm 2: vmovapd <r19=%ymm9,>r6=%ymm9
vmovapd % ymm9, % ymm9
# qhasm: a6 = mem256[ input_1 + 192 ]
# asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15
# asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14
vmovupd 192( % rsi), % ymm14
# qhasm: r = a6 & b0
# asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a6 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a6 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a6 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a6 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a6 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a6 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a6 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a6 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a6 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a6 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a6 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a6 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r18 ^= r
# asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9
# asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8
vpxor % ymm14, % ymm8, % ymm8
# qhasm: r9 ^= r18
# asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12
vpxor % ymm8, % ymm12, % ymm12
# qhasm: r8 ^= r18
# asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11
vpxor % ymm8, % ymm11, % ymm11
# qhasm: r6 ^= r18
# asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9
vpxor % ymm8, % ymm9, % ymm9
# qhasm: r5 = r18
# asm 1: vmovapd <r18=reg256#9,>r5=reg256#9
# asm 2: vmovapd <r18=%ymm8,>r5=%ymm8
vmovapd % ymm8, % ymm8
# qhasm: a5 = mem256[ input_1 + 160 ]
# asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15
# asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14
vmovupd 160( % rsi), % ymm14
# qhasm: r = a5 & b0
# asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a5 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a5 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a5 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a5 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a5 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a5 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a5 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a5 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a5 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a5 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a5 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a5 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r17 ^= r
# asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8
# asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7
vpxor % ymm14, % ymm7, % ymm7
# qhasm: r8 ^= r17
# asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11
vpxor % ymm7, % ymm11, % ymm11
# qhasm: r7 ^= r17
# asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10
vpxor % ymm7, % ymm10, % ymm10
# qhasm: r5 ^= r17
# asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8
vpxor % ymm7, % ymm8, % ymm8
# qhasm: r4 = r17
# asm 1: vmovapd <r17=reg256#8,>r4=reg256#8
# asm 2: vmovapd <r17=%ymm7,>r4=%ymm7
vmovapd % ymm7, % ymm7
# qhasm: a4 = mem256[ input_1 + 128 ]
# asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15
# asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14
vmovupd 128( % rsi), % ymm14
# qhasm: r = a4 & b0
# asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a4 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a4 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a4 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a4 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a4 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a4 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a4 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a4 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a4 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a4 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a4 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a4 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r16 ^= r
# asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7
# asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6
vpxor % ymm14, % ymm6, % ymm6
# qhasm: r7 ^= r16
# asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10
vpxor % ymm6, % ymm10, % ymm10
# qhasm: r6 ^= r16
# asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9
vpxor % ymm6, % ymm9, % ymm9
# qhasm: r4 ^= r16
# asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7
vpxor % ymm6, % ymm7, % ymm7
# qhasm: r3 = r16
# asm 1: vmovapd <r16=reg256#7,>r3=reg256#7
# asm 2: vmovapd <r16=%ymm6,>r3=%ymm6
vmovapd % ymm6, % ymm6
# qhasm: a3 = mem256[ input_1 + 96 ]
# asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15
# asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14
vmovupd 96( % rsi), % ymm14
# qhasm: r = a3 & b0
# asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a3 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a3 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a3 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a3 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a3 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a3 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a3 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a3 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a3 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a3 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a3 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a3 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r15 ^= r
# asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6
# asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5
vpxor % ymm14, % ymm5, % ymm5
# qhasm: r6 ^= r15
# asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9
vpxor % ymm5, % ymm9, % ymm9
# qhasm: r5 ^= r15
# asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8
vpxor % ymm5, % ymm8, % ymm8
# qhasm: r3 ^= r15
# asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6
vpxor % ymm5, % ymm6, % ymm6
# qhasm: r2 = r15
# asm 1: vmovapd <r15=reg256#6,>r2=reg256#6
# asm 2: vmovapd <r15=%ymm5,>r2=%ymm5
vmovapd % ymm5, % ymm5
# qhasm: a2 = mem256[ input_1 + 64 ]
# asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15
# asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14
vmovupd 64( % rsi), % ymm14
# qhasm: r = a2 & b0
# asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a2 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a2 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a2 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a2 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a2 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a2 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a2 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a2 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a2 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a2 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a2 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
vpxor % ymm15, % ymm3, % ymm3
# qhasm: r = a2 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r14 ^= r
# asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5
# asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4
vpxor % ymm14, % ymm4, % ymm4
# qhasm: r5 ^= r14
# asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8
vpxor % ymm4, % ymm8, % ymm8
# qhasm: r4 ^= r14
# asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7
vpxor % ymm4, % ymm7, % ymm7
# qhasm: r2 ^= r14
# asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5
vpxor % ymm4, % ymm5, % ymm5
# qhasm: r1 = r14
# asm 1: vmovapd <r14=reg256#5,>r1=reg256#5
# asm 2: vmovapd <r14=%ymm4,>r1=%ymm4
vmovapd % ymm4, % ymm4
# qhasm: a1 = mem256[ input_1 + 32 ]
# asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15
# asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14
vmovupd 32( % rsi), % ymm14
# qhasm: r = a1 & b0
# asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16
# asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15
vpand % ymm14, % ymm0, % ymm15
# qhasm: r1 ^= r
# asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5
# asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4
vpxor % ymm15, % ymm4, % ymm4
# qhasm: r = a1 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 32( % rdx), % ymm14, % ymm15
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5
vpxor % ymm15, % ymm5, % ymm5
# qhasm: r = a1 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 64( % rdx), % ymm14, % ymm15
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
vpxor % ymm15, % ymm6, % ymm6
# qhasm: r = a1 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 96( % rdx), % ymm14, % ymm15
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
vpxor % ymm15, % ymm7, % ymm7
# qhasm: r = a1 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 128( % rdx), % ymm14, % ymm15
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
vpxor % ymm15, % ymm8, % ymm8
# qhasm: r = a1 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 160( % rdx), % ymm14, % ymm15
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
vpxor % ymm15, % ymm9, % ymm9
# qhasm: r = a1 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 192( % rdx), % ymm14, % ymm15
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
vpxor % ymm15, % ymm10, % ymm10
# qhasm: r = a1 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 224( % rdx), % ymm14, % ymm15
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
vpxor % ymm15, % ymm11, % ymm11
# qhasm: r = a1 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 256( % rdx), % ymm14, % ymm15
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
vpxor % ymm15, % ymm12, % ymm12
# qhasm: r = a1 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 288( % rdx), % ymm14, % ymm15
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
vpxor % ymm15, % ymm13, % ymm13
# qhasm: r = a1 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 320( % rdx), % ymm14, % ymm15
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
vpxor % ymm15, % ymm1, % ymm1
# qhasm: r = a1 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
# asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
vpand 352( % rdx), % ymm14, % ymm15
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
vpxor % ymm15, % ymm2, % ymm2
# qhasm: r = a1 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15
# asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14
vpand 384( % rdx), % ymm14, % ymm14
# qhasm: r13 ^= r
# asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4
# asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3
vpxor % ymm14, % ymm3, % ymm3
# qhasm: r4 ^= r13
# asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7
vpxor % ymm3, % ymm7, % ymm7
# qhasm: r3 ^= r13
# asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6
vpxor % ymm3, % ymm6, % ymm6
# qhasm: r1 ^= r13
# asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5
# asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4
vpxor % ymm3, % ymm4, % ymm4
# qhasm: r0 = r13
# asm 1: vmovapd <r13=reg256#4,>r0=reg256#4
# asm 2: vmovapd <r13=%ymm3,>r0=%ymm3
vmovapd % ymm3, % ymm3
# qhasm: a0 = mem256[ input_1 + 0 ]
# asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15
# asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14
vmovupd 0( % rsi), % ymm14
# qhasm: r = a0 & b0
# asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1
# asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0
vpand % ymm14, % ymm0, % ymm0
# qhasm: r0 ^= r
# asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4
# asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3
vpxor % ymm0, % ymm3, % ymm3
# qhasm: r = a0 & mem256[input_2 + 32]
# asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 32( % rdx), % ymm14, % ymm0
# qhasm: r1 ^= r
# asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5
# asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4
vpxor % ymm0, % ymm4, % ymm4
# qhasm: r = a0 & mem256[input_2 + 64]
# asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 64( % rdx), % ymm14, % ymm0
# qhasm: r2 ^= r
# asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6
# asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5
vpxor % ymm0, % ymm5, % ymm5
# qhasm: r = a0 & mem256[input_2 + 96]
# asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 96( % rdx), % ymm14, % ymm0
# qhasm: r3 ^= r
# asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7
# asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6
vpxor % ymm0, % ymm6, % ymm6
# qhasm: r = a0 & mem256[input_2 + 128]
# asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 128( % rdx), % ymm14, % ymm0
# qhasm: r4 ^= r
# asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8
# asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7
vpxor % ymm0, % ymm7, % ymm7
# qhasm: r = a0 & mem256[input_2 + 160]
# asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 160( % rdx), % ymm14, % ymm0
# qhasm: r5 ^= r
# asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9
# asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8
vpxor % ymm0, % ymm8, % ymm8
# qhasm: r = a0 & mem256[input_2 + 192]
# asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 192( % rdx), % ymm14, % ymm0
# qhasm: r6 ^= r
# asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10
# asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9
vpxor % ymm0, % ymm9, % ymm9
# qhasm: r = a0 & mem256[input_2 + 224]
# asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 224( % rdx), % ymm14, % ymm0
# qhasm: r7 ^= r
# asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11
# asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10
vpxor % ymm0, % ymm10, % ymm10
# qhasm: r = a0 & mem256[input_2 + 256]
# asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 256( % rdx), % ymm14, % ymm0
# qhasm: r8 ^= r
# asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12
# asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11
vpxor % ymm0, % ymm11, % ymm11
# qhasm: r = a0 & mem256[input_2 + 288]
# asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 288( % rdx), % ymm14, % ymm0
# qhasm: r9 ^= r
# asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13
# asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12
vpxor % ymm0, % ymm12, % ymm12
# qhasm: r = a0 & mem256[input_2 + 320]
# asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 320( % rdx), % ymm14, % ymm0
# qhasm: r10 ^= r
# asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14
# asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13
vpxor % ymm0, % ymm13, % ymm13
# qhasm: r = a0 & mem256[input_2 + 352]
# asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 352( % rdx), % ymm14, % ymm0
# qhasm: r11 ^= r
# asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2
# asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1
vpxor % ymm0, % ymm1, % ymm1
# qhasm: r = a0 & mem256[input_2 + 384]
# asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
# asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
vpand 384( % rdx), % ymm14, % ymm0
# qhasm: r12 ^= r
# asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3
# asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2
vpxor % ymm0, % ymm2, % ymm2
# qhasm: r12 = r12 ^ mem256[ input_0 + 384 ]
# asm 1: vpxor 384(<input_0=int64#1),<r12=reg256#3,>r12=reg256#1
# asm 2: vpxor 384(<input_0=%rdi),<r12=%ymm2,>r12=%ymm0
vpxor 384( % rdi), % ymm2, % ymm0
# qhasm: mem256[ input_0 + 384 ] = r12
# asm 1: vmovupd <r12=reg256#1,384(<input_0=int64#1)
# asm 2: vmovupd <r12=%ymm0,384(<input_0=%rdi)
vmovupd % ymm0, 384( % rdi)
# qhasm: r12 = r12 ^ mem256[ input_1 + 384 ]
# asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#1,>r12=reg256#1
# asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm0,>r12=%ymm0
vpxor 384( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 384 ] = r12
# asm 1: vmovupd <r12=reg256#1,384(<input_1=int64#2)
# asm 2: vmovupd <r12=%ymm0,384(<input_1=%rsi)
vmovupd % ymm0, 384( % rsi)
# qhasm: r11 = r11 ^ mem256[ input_0 + 352 ]
# asm 1: vpxor 352(<input_0=int64#1),<r11=reg256#2,>r11=reg256#1
# asm 2: vpxor 352(<input_0=%rdi),<r11=%ymm1,>r11=%ymm0
vpxor 352( % rdi), % ymm1, % ymm0
# qhasm: mem256[ input_0 + 352 ] = r11
# asm 1: vmovupd <r11=reg256#1,352(<input_0=int64#1)
# asm 2: vmovupd <r11=%ymm0,352(<input_0=%rdi)
vmovupd % ymm0, 352( % rdi)
# qhasm: r11 = r11 ^ mem256[ input_1 + 352 ]
# asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#1,>r11=reg256#1
# asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm0,>r11=%ymm0
vpxor 352( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 352 ] = r11
# asm 1: vmovupd <r11=reg256#1,352(<input_1=int64#2)
# asm 2: vmovupd <r11=%ymm0,352(<input_1=%rsi)
vmovupd % ymm0, 352( % rsi)
# qhasm: r10 = r10 ^ mem256[ input_0 + 320 ]
# asm 1: vpxor 320(<input_0=int64#1),<r10=reg256#14,>r10=reg256#1
# asm 2: vpxor 320(<input_0=%rdi),<r10=%ymm13,>r10=%ymm0
vpxor 320( % rdi), % ymm13, % ymm0
# qhasm: mem256[ input_0 + 320 ] = r10
# asm 1: vmovupd <r10=reg256#1,320(<input_0=int64#1)
# asm 2: vmovupd <r10=%ymm0,320(<input_0=%rdi)
vmovupd % ymm0, 320( % rdi)
# qhasm: r10 = r10 ^ mem256[ input_1 + 320 ]
# asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#1,>r10=reg256#1
# asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm0,>r10=%ymm0
vpxor 320( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 320 ] = r10
# asm 1: vmovupd <r10=reg256#1,320(<input_1=int64#2)
# asm 2: vmovupd <r10=%ymm0,320(<input_1=%rsi)
vmovupd % ymm0, 320( % rsi)
# qhasm: r9 = r9 ^ mem256[ input_0 + 288 ]
# asm 1: vpxor 288(<input_0=int64#1),<r9=reg256#13,>r9=reg256#1
# asm 2: vpxor 288(<input_0=%rdi),<r9=%ymm12,>r9=%ymm0
vpxor 288( % rdi), % ymm12, % ymm0
# qhasm: mem256[ input_0 + 288 ] = r9
# asm 1: vmovupd <r9=reg256#1,288(<input_0=int64#1)
# asm 2: vmovupd <r9=%ymm0,288(<input_0=%rdi)
vmovupd % ymm0, 288( % rdi)
# qhasm: r9 = r9 ^ mem256[ input_1 + 288 ]
# asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#1,>r9=reg256#1
# asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm0,>r9=%ymm0
vpxor 288( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 288 ] = r9
# asm 1: vmovupd <r9=reg256#1,288(<input_1=int64#2)
# asm 2: vmovupd <r9=%ymm0,288(<input_1=%rsi)
vmovupd % ymm0, 288( % rsi)
# qhasm: r8 = r8 ^ mem256[ input_0 + 256 ]
# asm 1: vpxor 256(<input_0=int64#1),<r8=reg256#12,>r8=reg256#1
# asm 2: vpxor 256(<input_0=%rdi),<r8=%ymm11,>r8=%ymm0
vpxor 256( % rdi), % ymm11, % ymm0
# qhasm: mem256[ input_0 + 256 ] = r8
# asm 1: vmovupd <r8=reg256#1,256(<input_0=int64#1)
# asm 2: vmovupd <r8=%ymm0,256(<input_0=%rdi)
vmovupd % ymm0, 256( % rdi)
# qhasm: r8 = r8 ^ mem256[ input_1 + 256 ]
# asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#1,>r8=reg256#1
# asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm0,>r8=%ymm0
vpxor 256( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 256 ] = r8
# asm 1: vmovupd <r8=reg256#1,256(<input_1=int64#2)
# asm 2: vmovupd <r8=%ymm0,256(<input_1=%rsi)
vmovupd % ymm0, 256( % rsi)
# qhasm: r7 = r7 ^ mem256[ input_0 + 224 ]
# asm 1: vpxor 224(<input_0=int64#1),<r7=reg256#11,>r7=reg256#1
# asm 2: vpxor 224(<input_0=%rdi),<r7=%ymm10,>r7=%ymm0
vpxor 224( % rdi), % ymm10, % ymm0
# qhasm: mem256[ input_0 + 224 ] = r7
# asm 1: vmovupd <r7=reg256#1,224(<input_0=int64#1)
# asm 2: vmovupd <r7=%ymm0,224(<input_0=%rdi)
vmovupd % ymm0, 224( % rdi)
# qhasm: r7 = r7 ^ mem256[ input_1 + 224 ]
# asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#1,>r7=reg256#1
# asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm0,>r7=%ymm0
vpxor 224( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 224 ] = r7
# asm 1: vmovupd <r7=reg256#1,224(<input_1=int64#2)
# asm 2: vmovupd <r7=%ymm0,224(<input_1=%rsi)
vmovupd % ymm0, 224( % rsi)
# qhasm: r6 = r6 ^ mem256[ input_0 + 192 ]
# asm 1: vpxor 192(<input_0=int64#1),<r6=reg256#10,>r6=reg256#1
# asm 2: vpxor 192(<input_0=%rdi),<r6=%ymm9,>r6=%ymm0
vpxor 192( % rdi), % ymm9, % ymm0
# qhasm: mem256[ input_0 + 192 ] = r6
# asm 1: vmovupd <r6=reg256#1,192(<input_0=int64#1)
# asm 2: vmovupd <r6=%ymm0,192(<input_0=%rdi)
vmovupd % ymm0, 192( % rdi)
# qhasm: r6 = r6 ^ mem256[ input_1 + 192 ]
# asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#1,>r6=reg256#1
# asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm0,>r6=%ymm0
vpxor 192( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 192 ] = r6
# asm 1: vmovupd <r6=reg256#1,192(<input_1=int64#2)
# asm 2: vmovupd <r6=%ymm0,192(<input_1=%rsi)
vmovupd % ymm0, 192( % rsi)
# qhasm: r5 = r5 ^ mem256[ input_0 + 160 ]
# asm 1: vpxor 160(<input_0=int64#1),<r5=reg256#9,>r5=reg256#1
# asm 2: vpxor 160(<input_0=%rdi),<r5=%ymm8,>r5=%ymm0
vpxor 160( % rdi), % ymm8, % ymm0
# qhasm: mem256[ input_0 + 160 ] = r5
# asm 1: vmovupd <r5=reg256#1,160(<input_0=int64#1)
# asm 2: vmovupd <r5=%ymm0,160(<input_0=%rdi)
vmovupd % ymm0, 160( % rdi)
# qhasm: r5 = r5 ^ mem256[ input_1 + 160 ]
# asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#1,>r5=reg256#1
# asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm0,>r5=%ymm0
vpxor 160( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 160 ] = r5
# asm 1: vmovupd <r5=reg256#1,160(<input_1=int64#2)
# asm 2: vmovupd <r5=%ymm0,160(<input_1=%rsi)
vmovupd % ymm0, 160( % rsi)
# qhasm: r4 = r4 ^ mem256[ input_0 + 128 ]
# asm 1: vpxor 128(<input_0=int64#1),<r4=reg256#8,>r4=reg256#1
# asm 2: vpxor 128(<input_0=%rdi),<r4=%ymm7,>r4=%ymm0
vpxor 128( % rdi), % ymm7, % ymm0
# qhasm: mem256[ input_0 + 128 ] = r4
# asm 1: vmovupd <r4=reg256#1,128(<input_0=int64#1)
# asm 2: vmovupd <r4=%ymm0,128(<input_0=%rdi)
vmovupd % ymm0, 128( % rdi)
# qhasm: r4 = r4 ^ mem256[ input_1 + 128 ]
# asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#1,>r4=reg256#1
# asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm0,>r4=%ymm0
vpxor 128( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 128 ] = r4
# asm 1: vmovupd <r4=reg256#1,128(<input_1=int64#2)
# asm 2: vmovupd <r4=%ymm0,128(<input_1=%rsi)
vmovupd % ymm0, 128( % rsi)
# qhasm: r3 = r3 ^ mem256[ input_0 + 96 ]
# asm 1: vpxor 96(<input_0=int64#1),<r3=reg256#7,>r3=reg256#1
# asm 2: vpxor 96(<input_0=%rdi),<r3=%ymm6,>r3=%ymm0
vpxor 96( % rdi), % ymm6, % ymm0
# qhasm: mem256[ input_0 + 96 ] = r3
# asm 1: vmovupd <r3=reg256#1,96(<input_0=int64#1)
# asm 2: vmovupd <r3=%ymm0,96(<input_0=%rdi)
vmovupd % ymm0, 96( % rdi)
# qhasm: r3 = r3 ^ mem256[ input_1 + 96 ]
# asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#1,>r3=reg256#1
# asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm0,>r3=%ymm0
vpxor 96( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 96 ] = r3
# asm 1: vmovupd <r3=reg256#1,96(<input_1=int64#2)
# asm 2: vmovupd <r3=%ymm0,96(<input_1=%rsi)
vmovupd % ymm0, 96( % rsi)
# qhasm: r2 = r2 ^ mem256[ input_0 + 64 ]
# asm 1: vpxor 64(<input_0=int64#1),<r2=reg256#6,>r2=reg256#1
# asm 2: vpxor 64(<input_0=%rdi),<r2=%ymm5,>r2=%ymm0
vpxor 64( % rdi), % ymm5, % ymm0
# qhasm: mem256[ input_0 + 64 ] = r2
# asm 1: vmovupd <r2=reg256#1,64(<input_0=int64#1)
# asm 2: vmovupd <r2=%ymm0,64(<input_0=%rdi)
vmovupd % ymm0, 64( % rdi)
# qhasm: r2 = r2 ^ mem256[ input_1 + 64 ]
# asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#1,>r2=reg256#1
# asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm0,>r2=%ymm0
vpxor 64( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 64 ] = r2
# asm 1: vmovupd <r2=reg256#1,64(<input_1=int64#2)
# asm 2: vmovupd <r2=%ymm0,64(<input_1=%rsi)
vmovupd % ymm0, 64( % rsi)
# qhasm: r1 = r1 ^ mem256[ input_0 + 32 ]
# asm 1: vpxor 32(<input_0=int64#1),<r1=reg256#5,>r1=reg256#1
# asm 2: vpxor 32(<input_0=%rdi),<r1=%ymm4,>r1=%ymm0
vpxor 32( % rdi), % ymm4, % ymm0
# qhasm: mem256[ input_0 + 32 ] = r1
# asm 1: vmovupd <r1=reg256#1,32(<input_0=int64#1)
# asm 2: vmovupd <r1=%ymm0,32(<input_0=%rdi)
vmovupd % ymm0, 32( % rdi)
# qhasm: r1 = r1 ^ mem256[ input_1 + 32 ]
# asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#1,>r1=reg256#1
# asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm0,>r1=%ymm0
vpxor 32( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 32 ] = r1
# asm 1: vmovupd <r1=reg256#1,32(<input_1=int64#2)
# asm 2: vmovupd <r1=%ymm0,32(<input_1=%rsi)
vmovupd % ymm0, 32( % rsi)
# qhasm: r0 = r0 ^ mem256[ input_0 + 0 ]
# asm 1: vpxor 0(<input_0=int64#1),<r0=reg256#4,>r0=reg256#1
# asm 2: vpxor 0(<input_0=%rdi),<r0=%ymm3,>r0=%ymm0
vpxor 0( % rdi), % ymm3, % ymm0
# qhasm: mem256[ input_0 + 0 ] = r0
# asm 1: vmovupd <r0=reg256#1,0(<input_0=int64#1)
# asm 2: vmovupd <r0=%ymm0,0(<input_0=%rdi)
vmovupd % ymm0, 0( % rdi)
# qhasm: r0 = r0 ^ mem256[ input_1 + 0 ]
# asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#1,>r0=reg256#1
# asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm0,>r0=%ymm0
vpxor 0( % rsi), % ymm0, % ymm0
# qhasm: mem256[ input_1 + 0 ] = r0
# asm 1: vmovupd <r0=reg256#1,0(<input_1=int64#2)
# asm 2: vmovupd <r0=%ymm0,0(<input_1=%rsi)
vmovupd % ymm0, 0( % rsi)
# qhasm: return
add % r11, % rsp
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 3,689
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/sphincs-shake-128f-simple/aarch64/f1600x2.s
|
# From https://github.com/bwesterb/armed-keccak
.macro round
# Execute theta, but without xoring into the state yet.
# Compute parities p[i] = a[i] ^ a[5+i] ^ ... ^ a[20+i].
eor3.16b v25, v0, v5, v10
eor3.16b v26, v1, v6, v11
eor3.16b v27, v2, v7, v12
eor3.16b v28, v3, v8, v13
eor3.16b v29, v4, v9, v14
eor3.16b v25, v25, v15, v20
eor3.16b v26, v26, v16, v21
eor3.16b v27, v27, v17, v22
eor3.16b v28, v28, v18, v23
eor3.16b v29, v29, v19, v24
# d[0] = rotl(p[1], 1) ^ p[4]
rax1.2d v30, v29, v26
# d[3] = rotl(p[4], 1) ^ p[2]
rax1.2d v29, v27, v29
# d[1] = rotl(p[2], 1) ^ p[0]
rax1.2d v27, v25, v27
# d[4] = rotl(p[0], 1) ^ p[3]
rax1.2d v25, v28, v25
# d[2] = rotl(p[3], 1) ^ p[1]
rax1.2d v28, v26, v28
# Xor parities from step theta into the state at the same time
# as executing rho and pi.
eor.16b v0, v0, v30
mov.16b v31, v1
xar.2d v1, v6, v27, 20
xar.2d v6, v9, v25, 44
xar.2d v9, v22, v28, 3
xar.2d v22, v14, v25, 25
xar.2d v14, v20, v30, 46
xar.2d v20, v2, v28, 2
xar.2d v2, v12, v28, 21
xar.2d v12, v13, v29, 39
xar.2d v13, v19, v25, 56
xar.2d v19, v23, v29, 8
xar.2d v23, v15, v30, 23
xar.2d v15, v4, v25, 37
xar.2d v4, v24, v25, 50
xar.2d v24, v21, v27, 62
xar.2d v21, v8, v29, 9
xar.2d v8, v16, v27, 19
xar.2d v16, v5, v30, 28
xar.2d v5, v3, v29, 36
xar.2d v3, v18, v29, 43
xar.2d v18, v17, v28, 49
xar.2d v17, v11, v27, 54
xar.2d v11, v7, v28, 58
xar.2d v7, v10, v30, 61
xar.2d v10, v31, v27, 63
# Chi
bcax.16b v25, v0, v2, v1
bcax.16b v26, v1, v3, v2
bcax.16b v2, v2, v4, v3
bcax.16b v3, v3, v0, v4
bcax.16b v4, v4, v1, v0
mov.16b v0, v25
mov.16b v1, v26
bcax.16b v25, v5, v7, v6
bcax.16b v26, v6, v8, v7
bcax.16b v7, v7, v9, v8
bcax.16b v8, v8, v5, v9
bcax.16b v9, v9, v6, v5
mov.16b v5, v25
mov.16b v6, v26
bcax.16b v25, v10, v12, v11
bcax.16b v26, v11, v13, v12
bcax.16b v12, v12, v14, v13
bcax.16b v13, v13, v10, v14
bcax.16b v14, v14, v11, v10
mov.16b v10, v25
mov.16b v11, v26
bcax.16b v25, v15, v17, v16
bcax.16b v26, v16, v18, v17
bcax.16b v17, v17, v19, v18
bcax.16b v18, v18, v15, v19
bcax.16b v19, v19, v16, v15
mov.16b v15, v25
mov.16b v16, v26
bcax.16b v25, v20, v22, v21
bcax.16b v26, v21, v23, v22
bcax.16b v22, v22, v24, v23
bcax.16b v23, v23, v20, v24
bcax.16b v24, v24, v21, v20
mov.16b v20, v25
mov.16b v21, v26
# iota
ld1r {v25.2d}, [x1], #8
eor.16b v0, v0, v25
.endm
.align 4
.global __f1600x2
__f1600x2:
stp d8, d9, [sp,#-16]!
stp d10, d11, [sp,#-16]!
stp d12, d13, [sp,#-16]!
stp d14, d15, [sp,#-16]!
mov x2, x0
mov x3, #24
ld1.2d {v0, v1, v2, v3}, [x0], #64
ld1.2d {v4, v5, v6, v7}, [x0], #64
ld1.2d {v8, v9, v10, v11}, [x0], #64
ld1.2d {v12, v13, v14, v15}, [x0], #64
ld1.2d {v16, v17, v18, v19}, [x0], #64
ld1.2d {v20, v21, v22, v23}, [x0], #64
ld1.2d {v24}, [x0]
loop:
round
subs x3, x3, #1
cbnz x3, loop
mov x0, x2
st1.2d {v0, v1, v2, v3}, [x0], #64
st1.2d {v4, v5, v6, v7}, [x0], #64
st1.2d {v8, v9, v10, v11}, [x0], #64
st1.2d {v12, v13, v14, v15}, [x0], #64
st1.2d {v16, v17, v18, v19}, [x0], #64
st1.2d {v20, v21, v22, v23}, [x0], #64
st1.2d {v24}, [x0]
ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16
ret lr
|
mktmansour/MKT-KSA-Geolocation-Security
| 4,083
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-65/avx2/pointwise.S
|
#include "params.h"
#include "cdecl.h"
.text
.global cdecl(PQCLEAN_MLDSA65_AVX2_pointwise_avx)
.global _cdecl(PQCLEAN_MLDSA65_AVX2_pointwise_avx)
cdecl(PQCLEAN_MLDSA65_AVX2_pointwise_avx):
_cdecl(PQCLEAN_MLDSA65_AVX2_pointwise_avx):
#consts
vmovdqa _8XQINV*4(%rcx),%ymm0
vmovdqa _8XQ*4(%rcx),%ymm1
xor %eax,%eax
_looptop1:
#load
vmovdqa (%rsi),%ymm2
vmovdqa 32(%rsi),%ymm4
vmovdqa 64(%rsi),%ymm6
vmovdqa (%rdx),%ymm10
vmovdqa 32(%rdx),%ymm12
vmovdqa 64(%rdx),%ymm14
vpsrlq $32,%ymm2,%ymm3
vpsrlq $32,%ymm4,%ymm5
vmovshdup %ymm6,%ymm7
vpsrlq $32,%ymm10,%ymm11
vpsrlq $32,%ymm12,%ymm13
vmovshdup %ymm14,%ymm15
#mul
vpmuldq %ymm2,%ymm10,%ymm2
vpmuldq %ymm3,%ymm11,%ymm3
vpmuldq %ymm4,%ymm12,%ymm4
vpmuldq %ymm5,%ymm13,%ymm5
vpmuldq %ymm6,%ymm14,%ymm6
vpmuldq %ymm7,%ymm15,%ymm7
#reduce
vpmuldq %ymm0,%ymm2,%ymm10
vpmuldq %ymm0,%ymm3,%ymm11
vpmuldq %ymm0,%ymm4,%ymm12
vpmuldq %ymm0,%ymm5,%ymm13
vpmuldq %ymm0,%ymm6,%ymm14
vpmuldq %ymm0,%ymm7,%ymm15
vpmuldq %ymm1,%ymm10,%ymm10
vpmuldq %ymm1,%ymm11,%ymm11
vpmuldq %ymm1,%ymm12,%ymm12
vpmuldq %ymm1,%ymm13,%ymm13
vpmuldq %ymm1,%ymm14,%ymm14
vpmuldq %ymm1,%ymm15,%ymm15
vpsubq %ymm10,%ymm2,%ymm2
vpsubq %ymm11,%ymm3,%ymm3
vpsubq %ymm12,%ymm4,%ymm4
vpsubq %ymm13,%ymm5,%ymm5
vpsubq %ymm14,%ymm6,%ymm6
vpsubq %ymm15,%ymm7,%ymm7
vpsrlq $32,%ymm2,%ymm2
vpsrlq $32,%ymm4,%ymm4
vmovshdup %ymm6,%ymm6
#store
vpblendd $0xAA,%ymm3,%ymm2,%ymm2
vpblendd $0xAA,%ymm5,%ymm4,%ymm4
vpblendd $0xAA,%ymm7,%ymm6,%ymm6
vmovdqa %ymm2,(%rdi)
vmovdqa %ymm4,32(%rdi)
vmovdqa %ymm6,64(%rdi)
add $96,%rdi
add $96,%rsi
add $96,%rdx
add $1,%eax
cmp $10,%eax
jb _looptop1
vmovdqa (%rsi),%ymm2
vmovdqa 32(%rsi),%ymm4
vmovdqa (%rdx),%ymm10
vmovdqa 32(%rdx),%ymm12
vpsrlq $32,%ymm2,%ymm3
vpsrlq $32,%ymm4,%ymm5
vmovshdup %ymm10,%ymm11
vmovshdup %ymm12,%ymm13
#mul
vpmuldq %ymm2,%ymm10,%ymm2
vpmuldq %ymm3,%ymm11,%ymm3
vpmuldq %ymm4,%ymm12,%ymm4
vpmuldq %ymm5,%ymm13,%ymm5
#reduce
vpmuldq %ymm0,%ymm2,%ymm10
vpmuldq %ymm0,%ymm3,%ymm11
vpmuldq %ymm0,%ymm4,%ymm12
vpmuldq %ymm0,%ymm5,%ymm13
vpmuldq %ymm1,%ymm10,%ymm10
vpmuldq %ymm1,%ymm11,%ymm11
vpmuldq %ymm1,%ymm12,%ymm12
vpmuldq %ymm1,%ymm13,%ymm13
vpsubq %ymm10,%ymm2,%ymm2
vpsubq %ymm11,%ymm3,%ymm3
vpsubq %ymm12,%ymm4,%ymm4
vpsubq %ymm13,%ymm5,%ymm5
vpsrlq $32,%ymm2,%ymm2
vmovshdup %ymm4,%ymm4
#store
vpblendd $0x55,%ymm2,%ymm3,%ymm2
vpblendd $0x55,%ymm4,%ymm5,%ymm4
vmovdqa %ymm2,(%rdi)
vmovdqa %ymm4,32(%rdi)
ret
.macro pointwise off
#load
vmovdqa \off(%rsi),%ymm6
vmovdqa \off+32(%rsi),%ymm8
vmovdqa \off(%rdx),%ymm10
vmovdqa \off+32(%rdx),%ymm12
vpsrlq $32,%ymm6,%ymm7
vpsrlq $32,%ymm8,%ymm9
vmovshdup %ymm10,%ymm11
vmovshdup %ymm12,%ymm13
#mul
vpmuldq %ymm6,%ymm10,%ymm6
vpmuldq %ymm7,%ymm11,%ymm7
vpmuldq %ymm8,%ymm12,%ymm8
vpmuldq %ymm9,%ymm13,%ymm9
.endm
.macro acc
vpaddq %ymm6,%ymm2,%ymm2
vpaddq %ymm7,%ymm3,%ymm3
vpaddq %ymm8,%ymm4,%ymm4
vpaddq %ymm9,%ymm5,%ymm5
.endm
.global cdecl(PQCLEAN_MLDSA65_AVX2_pointwise_acc_avx)
.global _cdecl(PQCLEAN_MLDSA65_AVX2_pointwise_acc_avx)
cdecl(PQCLEAN_MLDSA65_AVX2_pointwise_acc_avx):
_cdecl(PQCLEAN_MLDSA65_AVX2_pointwise_acc_avx):
#consts
vmovdqa _8XQINV*4(%rcx),%ymm0
vmovdqa _8XQ*4(%rcx),%ymm1
xor %eax,%eax
_looptop2:
pointwise 0
#mov
vmovdqa %ymm6,%ymm2
vmovdqa %ymm7,%ymm3
vmovdqa %ymm8,%ymm4
vmovdqa %ymm9,%ymm5
pointwise 1024
acc
pointwise 2048
acc
pointwise 3072
acc
pointwise 4096
acc
#reduce
vpmuldq %ymm0,%ymm2,%ymm6
vpmuldq %ymm0,%ymm3,%ymm7
vpmuldq %ymm0,%ymm4,%ymm8
vpmuldq %ymm0,%ymm5,%ymm9
vpmuldq %ymm1,%ymm6,%ymm6
vpmuldq %ymm1,%ymm7,%ymm7
vpmuldq %ymm1,%ymm8,%ymm8
vpmuldq %ymm1,%ymm9,%ymm9
vpsubq %ymm6,%ymm2,%ymm2
vpsubq %ymm7,%ymm3,%ymm3
vpsubq %ymm8,%ymm4,%ymm4
vpsubq %ymm9,%ymm5,%ymm5
vpsrlq $32,%ymm2,%ymm2
vmovshdup %ymm4,%ymm4
#store
vpblendd $0xAA,%ymm3,%ymm2,%ymm2
vpblendd $0xAA,%ymm5,%ymm4,%ymm4
vmovdqa %ymm2,(%rdi)
vmovdqa %ymm4,32(%rdi)
add $64,%rsi
add $64,%rdx
add $64,%rdi
add $1,%eax
cmp $16,%eax
jb _looptop2
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 1,092
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-65/avx2/shuffle.S
|
#include "cdecl.h"
.include "shuffle.inc"
.text
nttunpack128_avx:
#load
vmovdqa (%rdi),%ymm4
vmovdqa 32(%rdi),%ymm5
vmovdqa 64(%rdi),%ymm6
vmovdqa 96(%rdi),%ymm7
vmovdqa 128(%rdi),%ymm8
vmovdqa 160(%rdi),%ymm9
vmovdqa 192(%rdi),%ymm10
vmovdqa 224(%rdi),%ymm11
shuffle8 4,8,3,8
shuffle8 5,9,4,9
shuffle8 6,10,5,10
shuffle8 7,11,6,11
shuffle4 3,5,7,5
shuffle4 8,10,3,10
shuffle4 4,6,8,6
shuffle4 9,11,4,11
shuffle2 7,8,9,8
shuffle2 5,6,7,6
shuffle2 3,4,5,4
shuffle2 10,11,3,11
#store
vmovdqa %ymm9,(%rdi)
vmovdqa %ymm8,32(%rdi)
vmovdqa %ymm7,64(%rdi)
vmovdqa %ymm6,96(%rdi)
vmovdqa %ymm5,128(%rdi)
vmovdqa %ymm4,160(%rdi)
vmovdqa %ymm3,192(%rdi)
vmovdqa %ymm11,224(%rdi)
ret
.global cdecl(PQCLEAN_MLDSA65_AVX2_nttunpack_avx)
.global _cdecl(PQCLEAN_MLDSA65_AVX2_nttunpack_avx)
cdecl(PQCLEAN_MLDSA65_AVX2_nttunpack_avx):
_cdecl(PQCLEAN_MLDSA65_AVX2_nttunpack_avx):
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 26,021
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-65/avx2/f1600x4.S
|
/* Taken from Bas Westerbaan's new 4-way SHAKE implementation
* for Sphincs+ (https://github.com/sphincs/sphincsplus/pull/14/),
* but uses vpshufb for byte-granular rotations as in the Keccak Code Package. */
#include "cdecl.h"
.data
.p2align 5
rho8:
.byte 7,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14,7,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14
rho56:
.byte 1,2,3,4,5,6,7,0,9,10,11,12,13,14,15,8,1,2,3,4,5,6,7,0,9,10,11,12,13,14,15,8
.text
.global cdecl(PQCLEAN_MLDSA65_AVX2_f1600x4)
.global _cdecl(PQCLEAN_MLDSA65_AVX2_f1600x4)
cdecl(PQCLEAN_MLDSA65_AVX2_f1600x4):
_cdecl(PQCLEAN_MLDSA65_AVX2_f1600x4):
vmovdqa rho8(%rip), %ymm0
movq $6, %rax
looptop:
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 192(%rdi), %ymm4, %ymm9
vpxor 384(%rdi), %ymm3, %ymm10
vpxor 576(%rdi), %ymm2, %ymm11
vpxor 768(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 0(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 768(%rdi)
vpxor 96(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 320(%rdi), %ymm5, %ymm10
vpxor 512(%rdi), %ymm4, %ymm11
vpxor 704(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 32(%rdi), %ymm4, %ymm8
vpxor 224(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 608(%rdi), %ymm1, %ymm11
vpxor 640(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 608(%rdi)
vpxor 128(%rdi), %ymm1, %ymm8
vpxor 160(%rdi), %ymm5, %ymm9
vpxor 352(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 736(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 128(%rdi)
vpxor 64(%rdi), %ymm3, %ymm8
vpxor 256(%rdi), %ymm2, %ymm9
vpxor 448(%rdi), %ymm1, %ymm10
vpxor 480(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 448(%rdi)
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 512(%rdi), %ymm4, %ymm9
vpxor 224(%rdi), %ymm3, %ymm10
vpxor 736(%rdi), %ymm2, %ymm11
vpxor 448(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 8(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 448(%rdi)
vpxor 576(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 640(%rdi), %ymm5, %ymm10
vpxor 352(%rdi), %ymm4, %ymm11
vpxor 64(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 192(%rdi), %ymm4, %ymm8
vpxor 704(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 128(%rdi), %ymm1, %ymm11
vpxor 480(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 128(%rdi)
vpxor 768(%rdi), %ymm1, %ymm8
vpxor 320(%rdi), %ymm5, %ymm9
vpxor 32(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 256(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 768(%rdi)
vpxor 384(%rdi), %ymm3, %ymm8
vpxor 96(%rdi), %ymm2, %ymm9
vpxor 608(%rdi), %ymm1, %ymm10
vpxor 160(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 608(%rdi)
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 352(%rdi), %ymm4, %ymm9
vpxor 704(%rdi), %ymm3, %ymm10
vpxor 256(%rdi), %ymm2, %ymm11
vpxor 608(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 16(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 608(%rdi)
vpxor 736(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 480(%rdi), %ymm5, %ymm10
vpxor 32(%rdi), %ymm4, %ymm11
vpxor 384(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 512(%rdi), %ymm4, %ymm8
vpxor 64(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 768(%rdi), %ymm1, %ymm11
vpxor 160(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 768(%rdi)
vpxor 448(%rdi), %ymm1, %ymm8
vpxor 640(%rdi), %ymm5, %ymm9
vpxor 192(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 96(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 448(%rdi)
vpxor 224(%rdi), %ymm3, %ymm8
vpxor 576(%rdi), %ymm2, %ymm9
vpxor 128(%rdi), %ymm1, %ymm10
vpxor 320(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 128(%rdi)
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 32(%rdi), %ymm4, %ymm9
vpxor 64(%rdi), %ymm3, %ymm10
vpxor 96(%rdi), %ymm2, %ymm11
vpxor 128(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 24(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 128(%rdi)
vpxor 256(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 160(%rdi), %ymm5, %ymm10
vpxor 192(%rdi), %ymm4, %ymm11
vpxor 224(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 352(%rdi), %ymm4, %ymm8
vpxor 384(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 448(%rdi), %ymm1, %ymm11
vpxor 320(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 448(%rdi)
vpxor 608(%rdi), %ymm1, %ymm8
vpxor 480(%rdi), %ymm5, %ymm9
vpxor 512(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 576(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 608(%rdi)
vpxor 704(%rdi), %ymm3, %ymm8
vpxor 736(%rdi), %ymm2, %ymm9
vpxor 768(%rdi), %ymm1, %ymm10
vpxor 640(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 768(%rdi)
addq $32, %rsi
subq $1, %rax
jnz looptop
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 4,490
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-65/avx2/ntt.S
|
#include "cdecl.h"
.include "shuffle.inc"
.macro butterfly l,h,zl0=1,zl1=1,zh0=2,zh1=2
vpmuldq %ymm\zl0,%ymm\h,%ymm13
vmovshdup %ymm\h,%ymm12
vpmuldq %ymm\zl1,%ymm12,%ymm14
vpmuldq %ymm\zh0,%ymm\h,%ymm\h
vpmuldq %ymm\zh1,%ymm12,%ymm12
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vmovshdup %ymm\h,%ymm\h
vpblendd $0xAA,%ymm12,%ymm\h,%ymm\h
vpsubd %ymm\h,%ymm\l,%ymm12
vpaddd %ymm\h,%ymm\l,%ymm\l
vmovshdup %ymm13,%ymm13
vpblendd $0xAA,%ymm14,%ymm13,%ymm13
vpaddd %ymm13,%ymm12,%ymm\h
vpsubd %ymm13,%ymm\l,%ymm\l
.endm
.macro levels0t1 off
/* level 0 */
vpbroadcastd (_ZETAS_QINV+1)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+1)*4(%rsi),%ymm2
vmovdqa 0+32*\off(%rdi),%ymm4
vmovdqa 128+32*\off(%rdi),%ymm5
vmovdqa 256+32*\off(%rdi),%ymm6
vmovdqa 384+32*\off(%rdi),%ymm7
vmovdqa 512+32*\off(%rdi),%ymm8
vmovdqa 640+32*\off(%rdi),%ymm9
vmovdqa 768+32*\off(%rdi),%ymm10
vmovdqa 896+32*\off(%rdi),%ymm11
butterfly 4,8
butterfly 5,9
butterfly 6,10
butterfly 7,11
/* level 1 */
vpbroadcastd (_ZETAS_QINV+2)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+2)*4(%rsi),%ymm2
butterfly 4,6
butterfly 5,7
vpbroadcastd (_ZETAS_QINV+3)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+3)*4(%rsi),%ymm2
butterfly 8,10
butterfly 9,11
vmovdqa %ymm4, 0+32*\off(%rdi)
vmovdqa %ymm5,128+32*\off(%rdi)
vmovdqa %ymm6,256+32*\off(%rdi)
vmovdqa %ymm7,384+32*\off(%rdi)
vmovdqa %ymm8,512+32*\off(%rdi)
vmovdqa %ymm9,640+32*\off(%rdi)
vmovdqa %ymm10,768+32*\off(%rdi)
vmovdqa %ymm11,896+32*\off(%rdi)
.endm
.macro levels2t7 off
/* level 2 */
vmovdqa 256*\off+ 0(%rdi),%ymm4
vmovdqa 256*\off+ 32(%rdi),%ymm5
vmovdqa 256*\off+ 64(%rdi),%ymm6
vmovdqa 256*\off+ 96(%rdi),%ymm7
vmovdqa 256*\off+128(%rdi),%ymm8
vmovdqa 256*\off+160(%rdi),%ymm9
vmovdqa 256*\off+192(%rdi),%ymm10
vmovdqa 256*\off+224(%rdi),%ymm11
vpbroadcastd (_ZETAS_QINV+4+\off)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+4+\off)*4(%rsi),%ymm2
butterfly 4,8
butterfly 5,9
butterfly 6,10
butterfly 7,11
shuffle8 4,8,3,8
shuffle8 5,9,4,9
shuffle8 6,10,5,10
shuffle8 7,11,6,11
/* level 3 */
vmovdqa (_ZETAS_QINV+8+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+8+8*\off)*4(%rsi),%ymm2
butterfly 3,5
butterfly 8,10
butterfly 4,6
butterfly 9,11
shuffle4 3,5,7,5
shuffle4 8,10,3,10
shuffle4 4,6,8,6
shuffle4 9,11,4,11
/* level 4 */
vmovdqa (_ZETAS_QINV+40+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+40+8*\off)*4(%rsi),%ymm2
butterfly 7,8
butterfly 5,6
butterfly 3,4
butterfly 10,11
shuffle2 7,8,9,8
shuffle2 5,6,7,6
shuffle2 3,4,5,4
shuffle2 10,11,3,11
/* level 5 */
vmovdqa (_ZETAS_QINV+72+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+72+8*\off)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 9,5,1,10,2,15
butterfly 8,4,1,10,2,15
butterfly 7,3,1,10,2,15
butterfly 6,11,1,10,2,15
/* level 6 */
vmovdqa (_ZETAS_QINV+104+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+104+8*\off)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 9,7,1,10,2,15
butterfly 8,6,1,10,2,15
vmovdqa (_ZETAS_QINV+104+8*\off+32)*4(%rsi),%ymm1
vmovdqa (_ZETAS+104+8*\off+32)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 5,3,1,10,2,15
butterfly 4,11,1,10,2,15
/* level 7 */
vmovdqa (_ZETAS_QINV+168+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 9,8,1,10,2,15
vmovdqa (_ZETAS_QINV+168+8*\off+32)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off+32)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 7,6,1,10,2,15
vmovdqa (_ZETAS_QINV+168+8*\off+64)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off+64)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 5,4,1,10,2,15
vmovdqa (_ZETAS_QINV+168+8*\off+96)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off+96)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 3,11,1,10,2,15
vmovdqa %ymm9,256*\off+ 0(%rdi)
vmovdqa %ymm8,256*\off+ 32(%rdi)
vmovdqa %ymm7,256*\off+ 64(%rdi)
vmovdqa %ymm6,256*\off+ 96(%rdi)
vmovdqa %ymm5,256*\off+128(%rdi)
vmovdqa %ymm4,256*\off+160(%rdi)
vmovdqa %ymm3,256*\off+192(%rdi)
vmovdqa %ymm11,256*\off+224(%rdi)
.endm
.text
.global cdecl(PQCLEAN_MLDSA65_AVX2_ntt_avx)
.global _cdecl(PQCLEAN_MLDSA65_AVX2_ntt_avx)
cdecl(PQCLEAN_MLDSA65_AVX2_ntt_avx):
_cdecl(PQCLEAN_MLDSA65_AVX2_ntt_avx):
vmovdqa _8XQ*4(%rsi),%ymm0
levels0t1 0
levels0t1 1
levels0t1 2
levels0t1 3
levels2t7 0
levels2t7 1
levels2t7 2
levels2t7 3
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 5,851
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-65/avx2/invntt.S
|
#include "cdecl.h"
.include "shuffle.inc"
.macro butterfly l,h,zl0=1,zl1=1,zh0=2,zh1=2
vpsubd %ymm\l,%ymm\h,%ymm12
vpaddd %ymm\h,%ymm\l,%ymm\l
vpmuldq %ymm\zl0,%ymm12,%ymm13
vmovshdup %ymm12,%ymm\h
vpmuldq %ymm\zl1,%ymm\h,%ymm14
vpmuldq %ymm\zh0,%ymm12,%ymm12
vpmuldq %ymm\zh1,%ymm\h,%ymm\h
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vpsubd %ymm13,%ymm12,%ymm12
vpsubd %ymm14,%ymm\h,%ymm\h
vmovshdup %ymm12,%ymm12
vpblendd $0xAA,%ymm\h,%ymm12,%ymm\h
.endm
.macro levels0t5 off
vmovdqa 256*\off+ 0(%rdi),%ymm4
vmovdqa 256*\off+ 32(%rdi),%ymm5
vmovdqa 256*\off+ 64(%rdi),%ymm6
vmovdqa 256*\off+ 96(%rdi),%ymm7
vmovdqa 256*\off+128(%rdi),%ymm8
vmovdqa 256*\off+160(%rdi),%ymm9
vmovdqa 256*\off+192(%rdi),%ymm10
vmovdqa 256*\off+224(%rdi),%ymm11
/* level 0 */
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-8)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-8)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 4,5,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-40)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-40)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 6,7,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-72)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-72)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 8,9,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-104)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-104)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 10,11,1,3,2,15
/* level 1 */
vpermq $0x1B,(_ZETAS_QINV+168-8*\off-8)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+168-8*\off-8)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 4,6,1,3,2,15
butterfly 5,7,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+168-8*\off-40)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+168-8*\off-40)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 8,10,1,3,2,15
butterfly 9,11,1,3,2,15
/* level 2 */
vpermq $0x1B,(_ZETAS_QINV+104-8*\off-8)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+104-8*\off-8)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 4,8,1,3,2,15
butterfly 5,9,1,3,2,15
butterfly 6,10,1,3,2,15
butterfly 7,11,1,3,2,15
/* level 3 */
shuffle2 4,5,3,5
shuffle2 6,7,4,7
shuffle2 8,9,6,9
shuffle2 10,11,8,11
vpermq $0x1B,(_ZETAS_QINV+72-8*\off-8)*4(%rsi),%ymm1
vpermq $0x1B,(_ZETAS+72-8*\off-8)*4(%rsi),%ymm2
butterfly 3,5
butterfly 4,7
butterfly 6,9
butterfly 8,11
/* level 4 */
shuffle4 3,4,10,4
shuffle4 6,8,3,8
shuffle4 5,7,6,7
shuffle4 9,11,5,11
vpermq $0x1B,(_ZETAS_QINV+40-8*\off-8)*4(%rsi),%ymm1
vpermq $0x1B,(_ZETAS+40-8*\off-8)*4(%rsi),%ymm2
butterfly 10,4
butterfly 3,8
butterfly 6,7
butterfly 5,11
/* level 5 */
shuffle8 10,3,9,3
shuffle8 6,5,10,5
shuffle8 4,8,6,8
shuffle8 7,11,4,11
vpbroadcastd (_ZETAS_QINV+7-\off)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+7-\off)*4(%rsi),%ymm2
butterfly 9,3
butterfly 10,5
butterfly 6,8
butterfly 4,11
vmovdqa %ymm9,256*\off+ 0(%rdi)
vmovdqa %ymm10,256*\off+ 32(%rdi)
vmovdqa %ymm6,256*\off+ 64(%rdi)
vmovdqa %ymm4,256*\off+ 96(%rdi)
vmovdqa %ymm3,256*\off+128(%rdi)
vmovdqa %ymm5,256*\off+160(%rdi)
vmovdqa %ymm8,256*\off+192(%rdi)
vmovdqa %ymm11,256*\off+224(%rdi)
.endm
.macro levels6t7 off
vmovdqa 0+32*\off(%rdi),%ymm4
vmovdqa 128+32*\off(%rdi),%ymm5
vmovdqa 256+32*\off(%rdi),%ymm6
vmovdqa 384+32*\off(%rdi),%ymm7
vmovdqa 512+32*\off(%rdi),%ymm8
vmovdqa 640+32*\off(%rdi),%ymm9
vmovdqa 768+32*\off(%rdi),%ymm10
vmovdqa 896+32*\off(%rdi),%ymm11
/* level 6 */
vpbroadcastd (_ZETAS_QINV+3)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+3)*4(%rsi),%ymm2
butterfly 4,6
butterfly 5,7
vpbroadcastd (_ZETAS_QINV+2)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+2)*4(%rsi),%ymm2
butterfly 8,10
butterfly 9,11
/* level 7 */
vpbroadcastd (_ZETAS_QINV+0)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+0)*4(%rsi),%ymm2
butterfly 4,8
butterfly 5,9
butterfly 6,10
butterfly 7,11
vmovdqa %ymm8,512+32*\off(%rdi)
vmovdqa %ymm9,640+32*\off(%rdi)
vmovdqa %ymm10,768+32*\off(%rdi)
vmovdqa %ymm11,896+32*\off(%rdi)
vmovdqa (_8XDIV_QINV)*4(%rsi),%ymm1
vmovdqa (_8XDIV)*4(%rsi),%ymm2
vpmuldq %ymm1,%ymm4,%ymm12
vpmuldq %ymm1,%ymm5,%ymm13
vmovshdup %ymm4,%ymm8
vmovshdup %ymm5,%ymm9
vpmuldq %ymm1,%ymm8,%ymm14
vpmuldq %ymm1,%ymm9,%ymm15
vpmuldq %ymm2,%ymm4,%ymm4
vpmuldq %ymm2,%ymm5,%ymm5
vpmuldq %ymm2,%ymm8,%ymm8
vpmuldq %ymm2,%ymm9,%ymm9
vpmuldq %ymm0,%ymm12,%ymm12
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vpmuldq %ymm0,%ymm15,%ymm15
vpsubd %ymm12,%ymm4,%ymm4
vpsubd %ymm13,%ymm5,%ymm5
vpsubd %ymm14,%ymm8,%ymm8
vpsubd %ymm15,%ymm9,%ymm9
vmovshdup %ymm4,%ymm4
vmovshdup %ymm5,%ymm5
vpblendd $0xAA,%ymm8,%ymm4,%ymm4
vpblendd $0xAA,%ymm9,%ymm5,%ymm5
vpmuldq %ymm1,%ymm6,%ymm12
vpmuldq %ymm1,%ymm7,%ymm13
vmovshdup %ymm6,%ymm8
vmovshdup %ymm7,%ymm9
vpmuldq %ymm1,%ymm8,%ymm14
vpmuldq %ymm1,%ymm9,%ymm15
vpmuldq %ymm2,%ymm6,%ymm6
vpmuldq %ymm2,%ymm7,%ymm7
vpmuldq %ymm2,%ymm8,%ymm8
vpmuldq %ymm2,%ymm9,%ymm9
vpmuldq %ymm0,%ymm12,%ymm12
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vpmuldq %ymm0,%ymm15,%ymm15
vpsubd %ymm12,%ymm6,%ymm6
vpsubd %ymm13,%ymm7,%ymm7
vpsubd %ymm14,%ymm8,%ymm8
vpsubd %ymm15,%ymm9,%ymm9
vmovshdup %ymm6,%ymm6
vmovshdup %ymm7,%ymm7
vpblendd $0xAA,%ymm8,%ymm6,%ymm6
vpblendd $0xAA,%ymm9,%ymm7,%ymm7
vmovdqa %ymm4, 0+32*\off(%rdi)
vmovdqa %ymm5,128+32*\off(%rdi)
vmovdqa %ymm6,256+32*\off(%rdi)
vmovdqa %ymm7,384+32*\off(%rdi)
.endm
.text
.global cdecl(PQCLEAN_MLDSA65_AVX2_invntt_avx)
.global _cdecl(PQCLEAN_MLDSA65_AVX2_invntt_avx)
cdecl(PQCLEAN_MLDSA65_AVX2_invntt_avx):
_cdecl(PQCLEAN_MLDSA65_AVX2_invntt_avx):
vmovdqa _8XQ*4(%rsi),%ymm0
levels0t5 0
levels0t5 1
levels0t5 2
levels0t5 3
levels6t7 0
levels6t7 1
levels6t7 2
levels6t7 3
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 19,073
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-65/aarch64/__asm_iNTT.S
|
/*
* We offer
* CC0 1.0 Universal or the following MIT License for this file.
* You may freely choose one of them that applies.
*
* MIT License
*
* Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang
* Copyright (c) 2023: Vincent Hwang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "macros.inc"
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_intt_SIMD_top
.global _PQCLEAN_MLDSA65_AARCH64__asm_intt_SIMD_top
PQCLEAN_MLDSA65_AARCH64__asm_intt_SIMD_top:
_PQCLEAN_MLDSA65_AARCH64__asm_intt_SIMD_top:
push_all
Q .req w20
Qhalf .req w21
nQhalf .req w22
invNR2ph .req w24
invNR2dp .req w25
invNWR2ph .req w26
invNWR2dp .req w27
src .req x0
counter .req x19
ldr Q, [x2, #0]
lsr Qhalf, Q, #1
neg nQhalf, Qhalf
ldr invNR2ph, [x2, #16]
ldr invNR2dp, [x2, #20]
ldr invNWR2ph, [x2, #24]
ldr invNWR2dp, [x2, #28]
ldr q20, [x1, #0*16]
ldr q21, [x1, #1*16]
ldr q22, [x1, #2*16]
ldr q23, [x1, #3*16]
ldr q24, [x1, #4*16]
ldr q25, [x1, #5*16]
ldr q26, [x1, #6*16]
ldr q27, [x1, #7*16]
mov v20.S[0], Q
ldr q0, [src, # 0*64]
ldr q1, [src, # 1*64]
ldr q2, [src, # 2*64]
ldr q3, [src, # 3*64]
ldr q4, [src, # 4*64]
ldr q5, [src, # 5*64]
ldr q6, [src, # 6*64]
ldr q7, [src, # 7*64]
qq_butterfly_botll \
v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, \
src, \
q8, q9, q10, q11, \
#8*64, #9*64, #10*64, #11*64, \
src, \
q12, q13, q14, q15, \
#12*64, #13*64, #14*64, #15*64
qq_butterfly_mix_rev v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v20, v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mix_rev v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v20, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3
qq_butterfly_mix_rev v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v20, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3
qq_butterfly_mix_rev v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v20, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1
qq_butterfly_mix_rev v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_butterfly_top v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_sub_add v16, v17, v18, v19, v28, v29, v30, v31, v0, v2, v4, v6, v8, v10, v12, v14
qq_sub_add v0, v2, v4, v6, v8, v10, v12, v14, v1, v3, v5, v7, v9, v11, v13, v15
mov v20.S[2], invNR2ph
mov v20.S[3], invNR2dp
qq_montgomery_mul v1, v3, v5, v7, v0, v2, v4, v6, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v0, v2, v4, v6, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
mov v20.S[2], invNWR2ph
mov v20.S[3], invNWR2dp
qq_montgomery_mul v9, v11, v13, v15, v8, v10, v12, v14, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v8, v10, v12, v14, v28, v29, v30, v31, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
mov counter, #3
_intt_top_loop:
dup v29.4S, Q
dup v30.4S, Qhalf
dup v31.4S, nQhalf
cmge v18.4S, v31.4S, v0.4S
cmge v19.4S, v31.4S, v1.4S
cmge v16.4S, v0.4S, v30.4S
cmge v17.4S, v1.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v0.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v2.4S
mla v1.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v3.4S
str q0, [src, #0*64]
cmge v16.4S, v2.4S, v30.4S
ldr q0, [src, #(16 + 0*64)]
str q1, [src, #1*64]
cmge v17.4S, v3.4S, v30.4S
ldr q1, [src, #(16 + 1*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v2.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v4.4S
mla v3.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v5.4S
str q2, [src, #2*64]
cmge v16.4S, v4.4S, v30.4S
ldr q2, [src, #(16 + 2*64)]
str q3, [src, #3*64]
cmge v17.4S, v5.4S, v30.4S
ldr q3, [src, #(16 + 3*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v4.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v6.4S
mla v5.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v7.4S
str q4, [src, #4*64]
cmge v16.4S, v6.4S, v30.4S
ldr q4, [src, #(16 + 4*64)]
str q5, [src, #5*64]
cmge v17.4S, v7.4S, v30.4S
ldr q5, [src, #(16 + 5*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v6.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v8.4S
mla v7.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v9.4S
str q6, [src, #6*64]
cmge v16.4S, v8.4S, v30.4S
ldr q6, [src, #(16 + 6*64)]
str q7, [src, #7*64]
cmge v17.4S, v9.4S, v30.4S
ldr q7, [src, #(16 + 7*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v8.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v10.4S
mla v9.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v11.4S
str q8, [src, #8*64]
cmge v16.4S, v10.4S, v30.4S
str q9, [src, #9*64]
cmge v17.4S, v11.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v10.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v12.4S
mla v11.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v13.4S
str q10, [src, #10*64]
cmge v16.4S, v12.4S, v30.4S
str q11, [src, #11*64]
cmge v17.4S, v13.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v12.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v14.4S
mla v13.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v15.4S
str q12, [src, #12*64]
cmge v16.4S, v14.4S, v30.4S
str q13, [src, #13*64]
cmge v17.4S, v15.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v14.4S, v16.4S, v29.4S
mla v15.4S, v17.4S, v29.4S
str q14, [src, #14*64]
str q15, [src, #15*64]
add src, src, #16
qq_butterfly_botll \
v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, \
src, \
q8, q9, q10, q11, \
#8*64, #9*64, #10*64, #11*64, \
src, \
q12, q13, q14, q15, \
#12*64, #13*64, #14*64, #15*64
qq_butterfly_mix_rev v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v20, v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mix_rev v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v20, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3
qq_butterfly_mix_rev v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v20, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3
qq_butterfly_mix_rev v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v20, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1
qq_butterfly_mix_rev v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_butterfly_top v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_sub_add v16, v17, v18, v19, v28, v29, v30, v31, v0, v2, v4, v6, v8, v10, v12, v14
qq_sub_add v0, v2, v4, v6, v8, v10, v12, v14, v1, v3, v5, v7, v9, v11, v13, v15
mov v20.S[2], invNR2ph
mov v20.S[3], invNR2dp
qq_montgomery_mul v1, v3, v5, v7, v0, v2, v4, v6, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v0, v2, v4, v6, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
mov v20.S[2], invNWR2ph
mov v20.S[3], invNWR2dp
qq_montgomery_mul v9, v11, v13, v15, v8, v10, v12, v14, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v8, v10, v12, v14, v28, v29, v30, v31, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
sub counter, counter, #1
cbnz counter, _intt_top_loop
dup v29.4S, Q
dup v30.4S, Qhalf
dup v31.4S, nQhalf
cmge v18.4S, v31.4S, v0.4S
cmge v19.4S, v31.4S, v1.4S
cmge v16.4S, v0.4S, v30.4S
cmge v17.4S, v1.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v0.4S, v16.4S, v29.4S
mla v1.4S, v17.4S, v29.4S
str q0, [src, #0*64]
str q1, [src, #1*64]
cmge v18.4S, v31.4S, v2.4S
cmge v19.4S, v31.4S, v3.4S
cmge v16.4S, v2.4S, v30.4S
cmge v17.4S, v3.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v2.4S, v16.4S, v29.4S
mla v3.4S, v17.4S, v29.4S
str q2, [src, #2*64]
str q3, [src, #3*64]
cmge v18.4S, v31.4S, v4.4S
cmge v19.4S, v31.4S, v5.4S
cmge v16.4S, v4.4S, v30.4S
cmge v17.4S, v5.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v4.4S, v16.4S, v29.4S
mla v5.4S, v17.4S, v29.4S
str q4, [src, #4*64]
str q5, [src, #5*64]
cmge v18.4S, v31.4S, v6.4S
cmge v19.4S, v31.4S, v7.4S
cmge v16.4S, v6.4S, v30.4S
cmge v17.4S, v7.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v6.4S, v16.4S, v29.4S
mla v7.4S, v17.4S, v29.4S
str q6, [src, #6*64]
str q7, [src, #7*64]
cmge v18.4S, v31.4S, v8.4S
cmge v19.4S, v31.4S, v9.4S
cmge v16.4S, v8.4S, v30.4S
cmge v17.4S, v9.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v8.4S, v16.4S, v29.4S
mla v9.4S, v17.4S, v29.4S
str q8, [src, #8*64]
str q9, [src, #9*64]
cmge v18.4S, v31.4S, v10.4S
cmge v19.4S, v31.4S, v11.4S
cmge v16.4S, v10.4S, v30.4S
cmge v17.4S, v11.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v10.4S, v16.4S, v29.4S
mla v11.4S, v17.4S, v29.4S
str q10, [src, #10*64]
str q11, [src, #11*64]
cmge v18.4S, v31.4S, v12.4S
cmge v19.4S, v31.4S, v13.4S
cmge v16.4S, v12.4S, v30.4S
cmge v17.4S, v13.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v12.4S, v16.4S, v29.4S
mla v13.4S, v17.4S, v29.4S
str q12, [src, #12*64]
str q13, [src, #13*64]
cmge v18.4S, v31.4S, v14.4S
cmge v19.4S, v31.4S, v15.4S
cmge v16.4S, v14.4S, v30.4S
cmge v17.4S, v15.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v14.4S, v16.4S, v29.4S
mla v15.4S, v17.4S, v29.4S
str q14, [src, #14*64]
str q15, [src, #15*64]
add src, src, #16
.unreq Q
.unreq Qhalf
.unreq nQhalf
.unreq invNR2ph
.unreq invNR2dp
.unreq invNWR2ph
.unreq invNWR2dp
.unreq src
.unreq counter
pop_all
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_intt_SIMD_bot
.global _PQCLEAN_MLDSA65_AARCH64__asm_intt_SIMD_bot
PQCLEAN_MLDSA65_AARCH64__asm_intt_SIMD_bot:
_PQCLEAN_MLDSA65_AARCH64__asm_intt_SIMD_bot:
push_all
Q .req w20
RphRdp .req x21
src0 .req x0
src1 .req x2
table0 .req x28
table1 .req x27
counter .req x19
ldr Q, [x2]
ldr RphRdp, [x2, #8]
add table0, x1, #128
add table1, table0, #1024
add src1, src0, #512
ldr q8, [table0, #4*16]
ldr q9, [table0, #5*16]
ldr q10, [table0, #6*16]
ldr q11, [table0, #7*16]
ldr q24, [table1, #4*16]
ldr q25, [table1, #5*16]
ldr q26, [table1, #6*16]
ldr q27, [table1, #7*16]
ldr q0, [src0, # 0*16]
ldr q1, [src0, # 1*16]
ldr q16, [src1, # 0*16]
ldr q17, [src1, # 1*16]
ldr q2, [src0, # 2*16]
ldr q3, [src0, # 3*16]
ldr q18, [src1, # 2*16]
ldr q19, [src1, # 3*16]
trn_4x4_l4 \
v0, v1, v2, v3, v12, v13, v14, v15, \
table0, \
q4, q5, q6, q7, \
#0*16, #1*16, #2*16, #3*16
trn_4x4_l4 \
v16, v17, v18, v19, v28, v29, v30, v31, \
table1, \
q20, q21, q22, q23, \
#0*16, #1*16, #2*16, #3*16
mov v4.S[0], Q
mov v20.D[0], RphRdp
dq_butterfly_vec_bot v0, v2, v12, v13, v1, v3, v4, v8, v9, v10, v11
dq_butterfly_vec_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v8, v9, v10, v11, v24, v25, v26, v27
dq_butterfly_vec_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v24, v25, v26, v27, v6, v7, v6, v7
dq_butterfly_vec_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v6, v7, v6, v7, v22, v23, v22, v23
mov counter, #7
_intt_bot_loop:
dq_butterfly_vec_top_ltrn_4x4 \
v28, v29, v18, v19, v4, v22, v23, v22, v23, \
table0, \
q8, q9, q10, q11, \
#(128+4*16), #(128+5*16), #(128+6*16), #(128+7*16), \
v0, v1, v2, v3, v12, v13, v14, v15
trn_4x4_l4 \
v16, v17, v18, v19, v28, v29, v30, v31, \
table1, \
q24, q25, q26, q27, \
#(128+4*16), #(128+5*16), #(128+6*16), #(128+7*16)
dq_butterfly_bot v0, v2, v12, v13, v1, v3, v4, v5, 0, 1, v5, 2, 3
dq_butterfly_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3
dq_butterfly_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v21, 0, 1, v21, 2, 3, v4, 2, 3, v4, 2, 3
dq_butterfly_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_top v16, v17, v28, v29, v18, v19, v4, v20, 2, 3, v20, 2, 3
str q2, [src0, # 2*16]
srshr v14.4S, v0.4S, #23
ldr q2, [src0, #(64+ 2*16)]
str q3, [src0, # 3*16]
srshr v15.4S, v1.4S, #23
ldr q3, [src0, #(64+ 3*16)]
str q18, [src1, # 2*16]
srshr v30.4S, v16.4S, #23
ldr q18, [src1, #(64+ 2*16)]
str q19, [src1, # 3*16]
srshr v31.4S, v17.4S, #23
ldr q19, [src1, #(64+ 3*16)]
mls v0.4S, v14.4S, v4.S[0]
str q0, [src0, # 0*16]
ldr q0, [src0, #(64+ 0*16)]
mls v1.4S, v15.4S, v4.S[0]
str q1, [src0, # 1*16]
ldr q1, [src0, #(64+ 1*16)]
mls v16.4S, v30.4S, v4.S[0]
str q16, [src1, # 0*16]
ldr q16, [src1, #(64+ 0*16)]
mls v17.4S, v31.4S, v4.S[0]
str q17, [src1, # 1*16]
ldr q17, [src1, #(64+ 1*16)]
add table0, table0, #128
add table1, table1, #128
add src0, src0, #64
add src1, src1, #64
trn_4x4_l4 \
v0, v1, v2, v3, v12, v13, v14, v15, \
table0, \
q4, q5, q6, q7, \
#0*16, #1*16, #2*16, #3*16
trn_4x4_l4 \
v16, v17, v18, v19, v28, v29, v30, v31, \
table1, \
q20, q21, q22, q23, \
#0*16, #1*16, #2*16, #3*16
mov v4.S[0], Q
mov v20.D[0], RphRdp
dq_butterfly_vec_bot v0, v2, v12, v13, v1, v3, v4, v8, v9, v10, v11
dq_butterfly_vec_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v8, v9, v10, v11, v24, v25, v26, v27
dq_butterfly_vec_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v24, v25, v26, v27, v6, v7, v6, v7
dq_butterfly_vec_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v6, v7, v6, v7, v22, v23, v22, v23
sub counter, counter, #1
cbnz counter, _intt_bot_loop
dq_butterfly_vec_top_trn_4x4 \
v16, v17, v28, v29, v18, v19, v4, v22, v23, v22, v23, \
v0, v1, v2, v3, v12, v13, v14, v15
trn_4x4 v16, v17, v18, v19, v28, v29, v30, v31
dq_butterfly_bot v0, v2, v12, v13, v1, v3, v4, v5, 0, 1, v5, 2, 3
dq_butterfly_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3
dq_butterfly_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v21, 0, 1, v21, 2, 3, v4, 2, 3, v4, 2, 3
dq_butterfly_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_top v16, v17, v28, v29, v18, v19, v4, v20, 2, 3, v20, 2, 3
str q2, [src0, # 2*16]
str q3, [src0, # 3*16]
str q18, [src1, # 2*16]
str q19, [src1, # 3*16]
srshr v14.4S, v0.4S, #23
srshr v15.4S, v1.4S, #23
srshr v30.4S, v16.4S, #23
srshr v31.4S, v17.4S, #23
mls v0.4S, v14.4S, v4.S[0]
mls v1.4S, v15.4S, v4.S[0]
mls v16.4S, v30.4S, v4.S[0]
mls v17.4S, v31.4S, v4.S[0]
str q0, [src0, # 0*16]
str q1, [src0, # 1*16]
str q16, [src1, # 0*16]
str q17, [src1, # 1*16]
add table0, table0, #128
add table1, table1, #128
add src0, src0, #64
add src1, src1, #64
.unreq Q
.unreq RphRdp
.unreq src0
.unreq src1
.unreq table0
.unreq table1
.unreq counter
pop_all
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 17,281
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-65/aarch64/__asm_NTT.S
|
/*
* We offer
* CC0 1.0 Universal or the following MIT License for this file.
* You may freely choose one of them that applies.
*
* MIT License
*
* Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang
* Copyright (c) 2023: Vincent Hwang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "macros.inc"
#include "params.h"
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_ntt_SIMD_top
.global _PQCLEAN_MLDSA65_AARCH64__asm_ntt_SIMD_top
PQCLEAN_MLDSA65_AARCH64__asm_ntt_SIMD_top:
_PQCLEAN_MLDSA65_AARCH64__asm_ntt_SIMD_top:
push_simd
Q .req w8
src .req x0
counter .req x11
ld1 {v20.4S, v21.4S, v22.4S, v23.4S}, [x1], #64
ld1 {v24.4S, v25.4S, v26.4S, v27.4S}, [x1], #64
ldr Q, [x2]
mov v20.S[0], Q
ldr q9, [src, #9*64]
ldr q11, [src, #11*64]
ldr q13, [src, #13*64]
ldr q15, [src, #15*64]
qq_butterfly_topl \
v9, v11, v13, v15, v16, v17, v18, v19, v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q1, q3, q5, q7, \
#1*64, #3*64, #5*64, #7*64
qq_butterfly_mixll \
v1, v3, v5, v7, v9, v11, v13, v15, v16, v17, v18, v19, \
v8, v10, v12, v14, v28, v29, v30, v31, \
v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q8, q10, q12, q14, \
#8*64, #10*64, #12*64, #14*64, \
src, \
q0, q2, q4, q6, \
#0*64, #2*64, #4*64, #6*64
qq_butterfly_mix v0, v2, v4, v6, v8, v10, v12, v14, v28, v29, v30, v31, v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mixssl \
v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, \
v1, v3, v5, v7, v28, v29, v30, v31, \
v20, \
v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, \
src, \
q9, q11, q13, q15, \
#9*64, #11*64, #13*64, #15*64, \
src, \
q8, q10, q12, q14, \
#8*64, #10*64, #12*64, #14*64, \
src, \
q9, q11, q13, q15, \
#(16+9*64), #(16+11*64), #(16+13*64), #(16+15*64)
mov counter, #3
_ntt_top_loop:
qq_butterfly_mixssl \
v0, v2, v4, v6, v1, v3, v5, v7, v28, v29, v30, v31, \
v9, v11, v13, v15, v16, v17, v18, v19, \
v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q1, q3, q5, q7, \
#1*64, #3*64, #5*64, #7*64, \
src, \
q0, q2, q4, q6, \
#0*64, #2*64, #4*64, #6*64, \
src, \
q1, q3, q5, q7, \
#(16+1*64), #(16+3*64), #(16+5*64), #(16+7*64)
qq_butterfly_mixll \
v1, v3, v5, v7, v9, v11, v13, v15, v16, v17, v18, v19, \
v8, v10, v12, v14, v28, v29, v30, v31, \
v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q8, q10, q12, q14, \
#(16+8*64), #(16+10*64), #(16+12*64), #(16+14*64), \
src, \
q0, q2, q4, q6, \
#(16+0*64), #(16+2*64), #(16+4*64), #(16+6*64)
add src, src, #16
qq_butterfly_mix v0, v2, v4, v6, v8, v10, v12, v14, v28, v29, v30, v31, v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mixssl \
v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, \
v1, v3, v5, v7, v28, v29, v30, v31, \
v20, \
v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, \
src, \
q9, q11, q13, q15, \
#9*64, #11*64, #13*64, #15*64, \
src, \
q8, q10, q12, q14, \
#8*64, #10*64, #12*64, #14*64, \
src, \
q9, q11, q13, q15, \
#(16+9*64), #(16+11*64), #(16+13*64), #(16+15*64)
sub counter, counter, #1
cbnz counter, _ntt_top_loop
qq_butterfly_botss \
v0, v2, v4, v6, v1, v3, v5, v7, v28, v29, v30, v31, \
src, \
q1, q3, q5, q7, \
#1*64, #3*64, #5*64, #7*64, \
src, \
q0, q2, q4, q6, \
#0*64, #2*64, #4*64, #6*64
.unreq Q
.unreq src
.unreq counter
pop_simd
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_ntt_SIMD_bot
.global _PQCLEAN_MLDSA65_AARCH64__asm_ntt_SIMD_bot
PQCLEAN_MLDSA65_AARCH64__asm_ntt_SIMD_bot:
_PQCLEAN_MLDSA65_AARCH64__asm_ntt_SIMD_bot:
push_simd
Q .req w8
src .req x0
table0 .req x9
table1 .req x10
counter .req x11
ldr Q, [x2]
add table0, x1, #128
add table1, table0, #1024
ldr q0, [src, #0*16]
ldr q1, [src, #1*16]
ldr q2, [src, #2*16]
ldr q3, [src, #3*16]
ldr q4, [table0, #0*16]
ldr q5, [table0, #1*16]
ldr q20, [table1, #0*16]
ldr q21, [table1, #1*16]
dq_butterfly_topl4 \
v0, v1, v2, v3, v12, v13, v4, v4, 2, 3, v4, 2, 3, \
src, \
q16, q17, q18, q19, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
dq_butterfly_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v16, v18, v17, v19, v28, v29, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v0, v1, v2, v3, v12, v13, v14, v15
dq_butterfly_vec_top_trn_4x4 \
v0, v1, v2, v3, v12, v13, v4, v6, v7, v6, v7, \
v16, v17, v18, v19, v28, v29, v30, v31
dq_butterfly_vec_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, v4, v8, v9, v10, v11, v24, v25, v26, v27
trn_4x4_l4 v0, v1, v2, v3, v8, v9, v10, v11, src, q12, q13, q14, q15, #(64+0*16), #(64+1*16), #(64+2*16), #(64+3*16)
str q0, [src, #0*16]
str q2, [src, #2*16]
dq_butterfly_vec_bot v16, v18, v17, v19, v28, v29, v4, v24, v25, v26, v27
str q1, [src, #1*16]
str q3, [src, #3*16]
add src, src, #64
trn_4x4_l4 v16, v17, v18, v19, v24, v25, v26, v27, src, q28, q29, q30, q31, #(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
sub src, src, #64
dq_butterfly_top2l4s4 \
v12, v13, v14, v15, v0, v1, v4, v4, 2, 3, v4, 2, 3, \
table0, q4, q5, #0*16, #1*16, \
table1, q20, q21, #0*16, #1*16, \
src, \
q16, q17, q18, q19, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
add src, src, #64
dq_butterfly_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v28, v30, v29, v31, v16, v17, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v12, v13, v14, v15, v0, v1, v2, v3
dq_butterfly_vec_top_trn_4x4 \
v12, v13, v14, v15, v0, v1, v4, v6, v7, v6, v7, \
v28, v29, v30, v31, v16, v17, v18, v19
dq_butterfly_vec_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, v4, v8, v9, v10, v11, v24, v25, v26, v27
mov counter, #3
_ntt_bot_loop:
trn_4x4_l4 v12, v13, v14, v15, v8, v9, v10, v11, src, q0, q1, q2, q3, #(64+0*16), #(64+1*16), #(64+2*16), #(64+3*16)
str q12, [src, #0*16]
str q13, [src, #1*16]
dq_butterfly_vec_bot v28, v30, v29, v31, v16, v17, v4, v24, v25, v26, v27
str q14, [src, #2*16]
str q15, [src, #3*16]
add src, src, #64
trn_4x4_l4 v28, v29, v30, v31, v24, v25, v26, v27, src, q16, q17, q18, q19, #(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
sub src, src, #64
dq_butterfly_top2l4s4 \
v0, v1, v2, v3, v12, v13, v4, v4, 2, 3, v4, 2, 3, \
table0, q4, q5, #0*16, #1*16, \
table1, q20, q21, #0*16, #1*16, \
src, \
q28, q29, q30, q31, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
add src, src, #64
dq_butterfly_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v16, v18, v17, v19, v28, v29, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v0, v1, v2, v3, v12, v13, v14, v15
dq_butterfly_vec_top_trn_4x4 \
v0, v1, v2, v3, v12, v13, v4, v6, v7, v6, v7, \
v16, v17, v18, v19, v28, v29, v30, v31
dq_butterfly_vec_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, v4, v8, v9, v10, v11, v24, v25, v26, v27
trn_4x4_l4 v0, v1, v2, v3, v8, v9, v10, v11, src, q12, q13, q14, q15, #(64+0*16), #(64+1*16), #(64+2*16), #(64+3*16)
str q0, [src, #0*16]
str q2, [src, #2*16]
dq_butterfly_vec_bot v16, v18, v17, v19, v28, v29, v4, v24, v25, v26, v27
str q1, [src, #1*16]
str q3, [src, #3*16]
add src, src, #64
trn_4x4_l4 v16, v17, v18, v19, v24, v25, v26, v27, src, q28, q29, q30, q31, #(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
sub src, src, #64
dq_butterfly_top2l4s4 \
v12, v13, v14, v15, v0, v1, v4, v4, 2, 3, v4, 2, 3, \
table0, q4, q5, #0*16, #1*16, \
table1, q20, q21, #0*16, #1*16, \
src, \
q16, q17, q18, q19, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
add src, src, #64
dq_butterfly_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v28, v30, v29, v31, v16, v17, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v12, v13, v14, v15, v0, v1, v2, v3
dq_butterfly_vec_top_trn_4x4 \
v12, v13, v14, v15, v0, v1, v4, v6, v7, v6, v7, \
v28, v29, v30, v31, v16, v17, v18, v19
dq_butterfly_vec_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, v4, v8, v9, v10, v11, v24, v25, v26, v27
sub counter, counter, #1
cbnz counter, _ntt_bot_loop
dq_butterfly_vec_bot v28, v30, v29, v31, v16, v17, v4, v24, v25, v26, v27
trn_4x4 v12, v13, v14, v15, v0, v1, v2, v3
trn_4x4_s4 v28, v29, v30, v31, v16, v17, v18, v19, src, q12, q13, q14, q15, #0*16, #1*16, #2*16, #3*16
str q28, [src, #(512+0*16)]
str q29, [src, #(512+1*16)]
str q30, [src, #(512+2*16)]
str q31, [src, #(512+3*16)]
add src, src, #64
.unreq Q
.unreq src
.unreq table0
.unreq table1
.unreq counter
pop_simd
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 31,124
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-65/aarch64/__asm_poly.S
|
/*
* We offer
* CC0 1.0 Universal or the following MIT License for this file.
* You may freely choose one of them that applies.
*
* MIT License
*
* Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang
* Copyright (c) 2023: Vincent Hwang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "macros.inc"
#include "params.h"
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_10_to_32
.global _PQCLEAN_MLDSA65_AARCH64__asm_10_to_32
PQCLEAN_MLDSA65_AARCH64__asm_10_to_32:
_PQCLEAN_MLDSA65_AARCH64__asm_10_to_32:
mov x7, #16
_10_to_32_loop:
ldr w2, [x1], #4
ubfx w3, w2, #0, #10
str w3, [x0], #4
ubfx w4, w2, #10, #10
str w4, [x0], #4
ubfx w5, w2, #20, #10
str w5, [x0], #4
lsr w6, w2, #30
ldr w2, [x1], #4
ubfx w3, w2, #0, #8
lsl w3, w3, #2
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #8, #10
str w4, [x0], #4
ubfx w5, w2, #18, #10
str w5, [x0], #4
lsr w6, w2, #28
ldr w2, [x1], #4
ubfx w3, w2, #0, #6
lsl w3, w3, #4
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #6, #10
str w4, [x0], #4
ubfx w5, w2, #16, #10
str w5, [x0], #4
lsr w6, w2, #26
ldr w2, [x1], #4
ubfx w3, w2, #0, #4
lsl w3, w3, #6
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #4, #10
str w4, [x0], #4
ubfx w5, w2, #14, #10
str w5, [x0], #4
lsr w6, w2, #24
ldr w2, [x1], #4
ubfx w3, w2, #0, #2
lsl w3, w3, #8
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #2, #10
str w4, [x0], #4
ubfx w5, w2, #12, #10
str w5, [x0], #4
ubfx w6, w2, #22, #10
str w6, [x0], #4
sub x7, x7, #1
cbnz x7, _10_to_32_loop
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_poly_reduce
.global _PQCLEAN_MLDSA65_AARCH64__asm_poly_reduce
PQCLEAN_MLDSA65_AARCH64__asm_poly_reduce:
_PQCLEAN_MLDSA65_AARCH64__asm_poly_reduce:
ldr w4, [x1]
dup v24.4S, w4
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
mov x16, #7
_poly_reduce_loop:
st1 { v4.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 { v5.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 { v6.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 { v7.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
sub x16, x16, #1
cbnz x16, _poly_reduce_loop
st1 { v4.4S}, [x0], #16
st1 { v5.4S}, [x0], #16
st1 { v6.4S}, [x0], #16
st1 { v7.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_poly_caddq
.global _PQCLEAN_MLDSA65_AARCH64__asm_poly_caddq
PQCLEAN_MLDSA65_AARCH64__asm_poly_caddq:
_PQCLEAN_MLDSA65_AARCH64__asm_poly_caddq:
ldr w4, [x1]
dup v24.4S, w4
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
sshr v16.4S, v0.4S, #31
ld1 { v5.4S}, [x1], #16
sshr v17.4S, v1.4S, #31
ld1 { v6.4S}, [x1], #16
sshr v18.4S, v2.4S, #31
ld1 { v7.4S}, [x1], #16
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
mov x16, #7
_poly_caddq_loop:
st1 { v4.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 { v5.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 { v6.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 { v7.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
sshr v16.4S, v0.4S, #31
ld1 { v5.4S}, [x1], #16
sshr v17.4S, v1.4S, #31
ld1 { v6.4S}, [x1], #16
sshr v18.4S, v2.4S, #31
ld1 { v7.4S}, [x1], #16
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
sub x16, x16, #1
cbnz x16, _poly_caddq_loop
st1 { v4.4S}, [x0], #16
st1 { v5.4S}, [x0], #16
st1 { v6.4S}, [x0], #16
st1 { v7.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_poly_freeze
.global _PQCLEAN_MLDSA65_AARCH64__asm_poly_freeze
PQCLEAN_MLDSA65_AARCH64__asm_poly_freeze:
_PQCLEAN_MLDSA65_AARCH64__asm_poly_freeze:
ldr w4, [x1]
dup v24.4S, w4
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
sshr v16.4S, v0.4S, #31
mls v5.4S, v21.4S, v24.4S
sshr v17.4S, v1.4S, #31
mls v6.4S, v22.4S, v24.4S
sshr v18.4S, v2.4S, #31
mls v7.4S, v23.4S, v24.4S
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
mov x16, #8
_poly_freeze_loop:
st1 { v4.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 { v5.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 { v6.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 { v7.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
sshr v16.4S, v0.4S, #31
mls v5.4S, v21.4S, v24.4S
sshr v17.4S, v1.4S, #31
mls v6.4S, v22.4S, v24.4S
sshr v18.4S, v2.4S, #31
mls v7.4S, v23.4S, v24.4S
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
sub x16, x16, #1
cbnz x16, _poly_freeze_loop
st1 { v4.4S}, [x0], #16
st1 { v5.4S}, [x0], #16
st1 { v6.4S}, [x0], #16
st1 { v7.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_poly_power2round
.global _PQCLEAN_MLDSA65_AARCH64__asm_poly_power2round
PQCLEAN_MLDSA65_AARCH64__asm_poly_power2round:
_PQCLEAN_MLDSA65_AARCH64__asm_poly_power2round:
mov w4, #1
dup v28.4S, w4
ld1 { v0.4S}, [x2], #16
ld1 { v1.4S}, [x2], #16
ld1 { v2.4S}, [x2], #16
ld1 { v3.4S}, [x2], #16
ld1 {v20.4S}, [x2], #16
sub v4.4S, v0.4S, v28.4S
ld1 {v21.4S}, [x2], #16
sub v5.4S, v1.4S, v28.4S
ld1 {v22.4S}, [x2], #16
sub v6.4S, v2.4S, v28.4S
ld1 {v23.4S}, [x2], #16
sub v7.4S, v3.4S, v28.4S
sub v24.4S, v20.4S, v28.4S
srshr v16.4S, v4.4S, #13
sub v25.4S, v21.4S, v28.4S
srshr v17.4S, v5.4S, #13
sub v26.4S, v22.4S, v28.4S
srshr v18.4S, v6.4S, #13
sub v27.4S, v23.4S, v28.4S
srshr v19.4S, v7.4S, #13
srshr v28.4S, v24.4S, #13
st1 {v16.4S}, [x0], #16
srshr v29.4S, v25.4S, #13
st1 {v17.4S}, [x0], #16
srshr v30.4S, v26.4S, #13
st1 {v18.4S}, [x0], #16
srshr v31.4S, v27.4S, #13
st1 {v19.4S}, [x0], #16
st1 {v28.4S}, [x0], #16
shl v4.4S, v16.4S, #13
st1 {v29.4S}, [x0], #16
shl v5.4S, v17.4S, #13
st1 {v30.4S}, [x0], #16
shl v6.4S, v18.4S, #13
st1 {v31.4S}, [x0], #16
shl v7.4S, v19.4S, #13
shl v24.4S, v28.4S, #13
sub v16.4S, v0.4S, v4.4S
shl v25.4S, v29.4S, #13
sub v17.4S, v1.4S, v5.4S
shl v26.4S, v30.4S, #13
sub v18.4S, v2.4S, v6.4S
shl v27.4S, v31.4S, #13
sub v19.4S, v3.4S, v7.4S
sub v28.4S, v20.4S, v24.4S
st1 {v16.4S}, [x1], #16
sub v29.4S, v21.4S, v25.4S
st1 {v17.4S}, [x1], #16
sub v30.4S, v22.4S, v26.4S
st1 {v18.4S}, [x1], #16
sub v31.4S, v23.4S, v27.4S
st1 {v19.4S}, [x1], #16
mov x16, #7
_poly_power2round_loop:
st1 {v28.4S}, [x1], #16
dup v28.4S, w4
ld1 { v0.4S}, [x2], #16
st1 {v29.4S}, [x1], #16
ld1 { v1.4S}, [x2], #16
st1 {v30.4S}, [x1], #16
ld1 { v2.4S}, [x2], #16
st1 {v31.4S}, [x1], #16
ld1 { v3.4S}, [x2], #16
ld1 {v20.4S}, [x2], #16
sub v4.4S, v0.4S, v28.4S
ld1 {v21.4S}, [x2], #16
sub v5.4S, v1.4S, v28.4S
ld1 {v22.4S}, [x2], #16
sub v6.4S, v2.4S, v28.4S
ld1 {v23.4S}, [x2], #16
sub v7.4S, v3.4S, v28.4S
sub v24.4S, v20.4S, v28.4S
srshr v16.4S, v4.4S, #13
sub v25.4S, v21.4S, v28.4S
srshr v17.4S, v5.4S, #13
sub v26.4S, v22.4S, v28.4S
srshr v18.4S, v6.4S, #13
sub v27.4S, v23.4S, v28.4S
srshr v19.4S, v7.4S, #13
srshr v28.4S, v24.4S, #13
st1 {v16.4S}, [x0], #16
srshr v29.4S, v25.4S, #13
st1 {v17.4S}, [x0], #16
srshr v30.4S, v26.4S, #13
st1 {v18.4S}, [x0], #16
srshr v31.4S, v27.4S, #13
st1 {v19.4S}, [x0], #16
st1 {v28.4S}, [x0], #16
shl v4.4S, v16.4S, #13
st1 {v29.4S}, [x0], #16
shl v5.4S, v17.4S, #13
st1 {v30.4S}, [x0], #16
shl v6.4S, v18.4S, #13
st1 {v31.4S}, [x0], #16
shl v7.4S, v19.4S, #13
shl v24.4S, v28.4S, #13
sub v16.4S, v0.4S, v4.4S
shl v25.4S, v29.4S, #13
sub v17.4S, v1.4S, v5.4S
shl v26.4S, v30.4S, #13
sub v18.4S, v2.4S, v6.4S
shl v27.4S, v31.4S, #13
sub v19.4S, v3.4S, v7.4S
sub v28.4S, v20.4S, v24.4S
st1 {v16.4S}, [x1], #16
sub v29.4S, v21.4S, v25.4S
st1 {v17.4S}, [x1], #16
sub v30.4S, v22.4S, v26.4S
st1 {v18.4S}, [x1], #16
sub v31.4S, v23.4S, v27.4S
st1 {v19.4S}, [x1], #16
sub x16, x16, #1
cbnz x16, _poly_power2round_loop
st1 {v28.4S}, [x1], #16
st1 {v29.4S}, [x1], #16
st1 {v30.4S}, [x1], #16
st1 {v31.4S}, [x1], #16
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_poly_add
.global _PQCLEAN_MLDSA65_AARCH64__asm_poly_add
PQCLEAN_MLDSA65_AARCH64__asm_poly_add:
_PQCLEAN_MLDSA65_AARCH64__asm_poly_add:
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
add v16.4S, v0.4S, v4.4S
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
add v17.4S, v1.4S, v5.4S
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
add v18.4S, v2.4S, v6.4S
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
add v19.4S, v3.4S, v7.4S
mov x16, #15
_poly_add_loop:
st1 {v16.4S}, [x0], #16
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
add v16.4S, v0.4S, v4.4S
st1 {v17.4S}, [x0], #16
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
add v17.4S, v1.4S, v5.4S
st1 {v18.4S}, [x0], #16
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
add v18.4S, v2.4S, v6.4S
st1 {v19.4S}, [x0], #16
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
add v19.4S, v3.4S, v7.4S
sub x16, x16, #1
cbnz x16, _poly_add_loop
st1 {v16.4S}, [x0], #16
st1 {v17.4S}, [x0], #16
st1 {v18.4S}, [x0], #16
st1 {v19.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_poly_sub
.global _PQCLEAN_MLDSA65_AARCH64__asm_poly_sub
PQCLEAN_MLDSA65_AARCH64__asm_poly_sub:
_PQCLEAN_MLDSA65_AARCH64__asm_poly_sub:
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
sub v16.4S, v0.4S, v4.4S
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
sub v17.4S, v1.4S, v5.4S
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
sub v18.4S, v2.4S, v6.4S
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
sub v19.4S, v3.4S, v7.4S
mov x16, #15
_poly_sub_loop:
st1 {v16.4S}, [x0], #16
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
sub v16.4S, v0.4S, v4.4S
st1 {v17.4S}, [x0], #16
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
sub v17.4S, v1.4S, v5.4S
st1 {v18.4S}, [x0], #16
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
sub v18.4S, v2.4S, v6.4S
st1 {v19.4S}, [x0], #16
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
sub v19.4S, v3.4S, v7.4S
sub x16, x16, #1
cbnz x16, _poly_sub_loop
st1 {v16.4S}, [x0], #16
st1 {v17.4S}, [x0], #16
st1 {v18.4S}, [x0], #16
st1 {v19.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_poly_shiftl
.global _PQCLEAN_MLDSA65_AARCH64__asm_poly_shiftl
PQCLEAN_MLDSA65_AARCH64__asm_poly_shiftl:
_PQCLEAN_MLDSA65_AARCH64__asm_poly_shiftl:
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
shl v16.4S, v0.4S, #13
ld1 { v1.4S}, [x1], #16
shl v17.4S, v1.4S, #13
ld1 { v2.4S}, [x1], #16
shl v18.4S, v2.4S, #13
ld1 { v3.4S}, [x1], #16
shl v19.4S, v3.4S, #13
ld1 { v4.4S}, [x1], #16
shl v20.4S, v4.4S, #13
ld1 { v5.4S}, [x1], #16
shl v21.4S, v5.4S, #13
ld1 { v6.4S}, [x1], #16
shl v22.4S, v6.4S, #13
ld1 { v7.4S}, [x1], #16
shl v23.4S, v7.4S, #13
mov x16, #7
_poly_shiftl_loop:
st1 {v16.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
shl v16.4S, v0.4S, #13
st1 {v17.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
shl v17.4S, v1.4S, #13
st1 {v18.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
shl v18.4S, v2.4S, #13
st1 {v19.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
shl v19.4S, v3.4S, #13
st1 {v20.4S}, [x0], #16
ld1 { v4.4S}, [x1], #16
shl v20.4S, v4.4S, #13
st1 {v21.4S}, [x0], #16
ld1 { v5.4S}, [x1], #16
shl v21.4S, v5.4S, #13
st1 {v22.4S}, [x0], #16
ld1 { v6.4S}, [x1], #16
shl v22.4S, v6.4S, #13
st1 {v23.4S}, [x0], #16
ld1 { v7.4S}, [x1], #16
shl v23.4S, v7.4S, #13
sub x16, x16, #1
cbnz x16, _poly_shiftl_loop
st1 {v16.4S}, [x0], #16
st1 {v17.4S}, [x0], #16
st1 {v18.4S}, [x0], #16
st1 {v19.4S}, [x0], #16
st1 {v20.4S}, [x0], #16
st1 {v21.4S}, [x0], #16
st1 {v22.4S}, [x0], #16
st1 {v23.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_poly_pointwise_montgomery
.global _PQCLEAN_MLDSA65_AARCH64__asm_poly_pointwise_montgomery
PQCLEAN_MLDSA65_AARCH64__asm_poly_pointwise_montgomery:
_PQCLEAN_MLDSA65_AARCH64__asm_poly_pointwise_montgomery:
push_all
ldr w20, [x3, #0]
ldr w21, [x3, #4]
dup v30.4S, w20
dup v31.4S, w21
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x2], #16
ld1 { v5.4S}, [x2], #16
ld1 { v6.4S}, [x2], #16
ld1 { v7.4S}, [x2], #16
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
uzp1 v21.4S, v13.4S, v17.4S
uzp1 v22.4S, v14.4S, v18.4S
uzp1 v23.4S, v15.4S, v19.4S
mul v24.4S, v20.4S, v31.4S
mul v25.4S, v21.4S, v31.4S
mul v26.4S, v22.4S, v31.4S
mul v27.4S, v23.4S, v31.4S
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
mov x16, #15
_poly_pointwise_montgomery_loop:
st1 {v24.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 {v25.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 {v26.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 {v27.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x2], #16
ld1 { v5.4S}, [x2], #16
ld1 { v6.4S}, [x2], #16
ld1 { v7.4S}, [x2], #16
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
uzp1 v21.4S, v13.4S, v17.4S
uzp1 v22.4S, v14.4S, v18.4S
uzp1 v23.4S, v15.4S, v19.4S
mul v24.4S, v20.4S, v31.4S
mul v25.4S, v21.4S, v31.4S
mul v26.4S, v22.4S, v31.4S
mul v27.4S, v23.4S, v31.4S
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
sub x16, x16, #1
cbnz x16, _poly_pointwise_montgomery_loop
st1 {v24.4S}, [x0], #16
st1 {v25.4S}, [x0], #16
st1 {v26.4S}, [x0], #16
st1 {v27.4S}, [x0], #16
pop_all
ret
.align 2
.global PQCLEAN_MLDSA65_AARCH64__asm_polyvecl_pointwise_acc_montgomery
.global _PQCLEAN_MLDSA65_AARCH64__asm_polyvecl_pointwise_acc_montgomery
PQCLEAN_MLDSA65_AARCH64__asm_polyvecl_pointwise_acc_montgomery:
_PQCLEAN_MLDSA65_AARCH64__asm_polyvecl_pointwise_acc_montgomery:
push_all
ldr w20, [x3, #0]
ldr w21, [x3, #4]
add x5, x1, #1024*1
add x6, x2, #1024*1
add x7, x1, #1024*2
add x8, x2, #1024*2
add x9, x1, #1024*3
add x10, x2, #1024*3
#if L > 4
add x11, x1, #1024*4
add x12, x2, #1024*4
#endif
#if L > 5
add x13, x11, #1024*1
add x14, x12, #1024*1
add x15, x11, #1024*2
add x19, x12, #1024*2
#endif
dup v30.4S, w20
dup v31.4S, w21
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x2], #16
ld1 { v5.4S}, [x2], #16
ld1 { v6.4S}, [x2], #16
ld1 { v7.4S}, [x2], #16
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x5], #16
ld1 { v4.4S}, [ x6], #16
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x5], #16
ld1 { v5.4S}, [ x6], #16
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x5], #16
ld1 { v6.4S}, [ x6], #16
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x5], #16
ld1 { v7.4S}, [ x6], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x7], #16
ld1 { v4.4S}, [ x8], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x7], #16
ld1 { v5.4S}, [ x8], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x7], #16
ld1 { v6.4S}, [ x8], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x7], #16
ld1 { v7.4S}, [ x8], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x9], #16
ld1 { v4.4S}, [x10], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x9], #16
ld1 { v5.4S}, [x10], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x9], #16
ld1 { v6.4S}, [x10], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x9], #16
ld1 { v7.4S}, [x10], #16
#if L > 4
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x11], #16
ld1 { v4.4S}, [x12], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x11], #16
ld1 { v5.4S}, [x12], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x11], #16
ld1 { v6.4S}, [x12], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x11], #16
ld1 { v7.4S}, [x12], #16
#endif
#if L > 5
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x13], #16
ld1 { v4.4S}, [x14], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x13], #16
ld1 { v5.4S}, [x14], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x13], #16
ld1 { v6.4S}, [x14], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x13], #16
ld1 { v7.4S}, [x14], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x15], #16
ld1 { v4.4S}, [x19], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x15], #16
ld1 { v5.4S}, [x19], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x15], #16
ld1 { v6.4S}, [x19], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x15], #16
ld1 { v7.4S}, [x19], #16
#endif
mov x16, #15
_polyvecl_pointwise_acc_montgomery_loop:
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
ld1 { v0.4S}, [x1], #16
uzp1 v21.4S, v13.4S, v17.4S
ld1 { v1.4S}, [x1], #16
uzp1 v22.4S, v14.4S, v18.4S
ld1 { v2.4S}, [x1], #16
uzp1 v23.4S, v15.4S, v19.4S
ld1 { v3.4S}, [x1], #16
mul v24.4S, v20.4S, v31.4S
ld1 { v4.4S}, [x2], #16
mul v25.4S, v21.4S, v31.4S
ld1 { v5.4S}, [x2], #16
mul v26.4S, v22.4S, v31.4S
ld1 { v6.4S}, [x2], #16
mul v27.4S, v23.4S, v31.4S
ld1 { v7.4S}, [x2], #16
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x5], #16
st1 {v24.4S}, [x0], #16
ld1 { v4.4S}, [ x6], #16
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x5], #16
st1 {v25.4S}, [x0], #16
ld1 { v5.4S}, [ x6], #16
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x5], #16
st1 {v26.4S}, [x0], #16
ld1 { v6.4S}, [ x6], #16
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x5], #16
st1 {v27.4S}, [x0], #16
ld1 { v7.4S}, [ x6], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x7], #16
ld1 { v4.4S}, [ x8], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x7], #16
ld1 { v5.4S}, [ x8], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x7], #16
ld1 { v6.4S}, [ x8], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x7], #16
ld1 { v7.4S}, [ x8], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x9], #16
ld1 { v4.4S}, [x10], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x9], #16
ld1 { v5.4S}, [x10], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x9], #16
ld1 { v6.4S}, [x10], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x9], #16
ld1 { v7.4S}, [x10], #16
#if L > 4
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x11], #16
ld1 { v4.4S}, [x12], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x11], #16
ld1 { v5.4S}, [x12], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x11], #16
ld1 { v6.4S}, [x12], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x11], #16
ld1 { v7.4S}, [x12], #16
#endif
#if L > 5
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x13], #16
ld1 { v4.4S}, [x14], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x13], #16
ld1 { v5.4S}, [x14], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x13], #16
ld1 { v6.4S}, [x14], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x13], #16
ld1 { v7.4S}, [x14], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x15], #16
ld1 { v4.4S}, [x19], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x15], #16
ld1 { v5.4S}, [x19], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x15], #16
ld1 { v6.4S}, [x19], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x15], #16
ld1 { v7.4S}, [x19], #16
#endif
sub x16, x16, #1
cbnz x16, _polyvecl_pointwise_acc_montgomery_loop
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
uzp1 v21.4S, v13.4S, v17.4S
uzp1 v22.4S, v14.4S, v18.4S
uzp1 v23.4S, v15.4S, v19.4S
mul v24.4S, v20.4S, v31.4S
mul v25.4S, v21.4S, v31.4S
mul v26.4S, v22.4S, v31.4S
mul v27.4S, v23.4S, v31.4S
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
st1 {v24.4S}, [x0], #16
st1 {v25.4S}, [x0], #16
st1 {v26.4S}, [x0], #16
st1 {v27.4S}, [x0], #16
pop_all
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 4,121
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-87/avx2/pointwise.S
|
#include "params.h"
#include "cdecl.h"
.text
.global cdecl(PQCLEAN_MLDSA87_AVX2_pointwise_avx)
.global _cdecl(PQCLEAN_MLDSA87_AVX2_pointwise_avx)
cdecl(PQCLEAN_MLDSA87_AVX2_pointwise_avx):
_cdecl(PQCLEAN_MLDSA87_AVX2_pointwise_avx):
#consts
vmovdqa _8XQINV*4(%rcx),%ymm0
vmovdqa _8XQ*4(%rcx),%ymm1
xor %eax,%eax
_looptop1:
#load
vmovdqa (%rsi),%ymm2
vmovdqa 32(%rsi),%ymm4
vmovdqa 64(%rsi),%ymm6
vmovdqa (%rdx),%ymm10
vmovdqa 32(%rdx),%ymm12
vmovdqa 64(%rdx),%ymm14
vpsrlq $32,%ymm2,%ymm3
vpsrlq $32,%ymm4,%ymm5
vmovshdup %ymm6,%ymm7
vpsrlq $32,%ymm10,%ymm11
vpsrlq $32,%ymm12,%ymm13
vmovshdup %ymm14,%ymm15
#mul
vpmuldq %ymm2,%ymm10,%ymm2
vpmuldq %ymm3,%ymm11,%ymm3
vpmuldq %ymm4,%ymm12,%ymm4
vpmuldq %ymm5,%ymm13,%ymm5
vpmuldq %ymm6,%ymm14,%ymm6
vpmuldq %ymm7,%ymm15,%ymm7
#reduce
vpmuldq %ymm0,%ymm2,%ymm10
vpmuldq %ymm0,%ymm3,%ymm11
vpmuldq %ymm0,%ymm4,%ymm12
vpmuldq %ymm0,%ymm5,%ymm13
vpmuldq %ymm0,%ymm6,%ymm14
vpmuldq %ymm0,%ymm7,%ymm15
vpmuldq %ymm1,%ymm10,%ymm10
vpmuldq %ymm1,%ymm11,%ymm11
vpmuldq %ymm1,%ymm12,%ymm12
vpmuldq %ymm1,%ymm13,%ymm13
vpmuldq %ymm1,%ymm14,%ymm14
vpmuldq %ymm1,%ymm15,%ymm15
vpsubq %ymm10,%ymm2,%ymm2
vpsubq %ymm11,%ymm3,%ymm3
vpsubq %ymm12,%ymm4,%ymm4
vpsubq %ymm13,%ymm5,%ymm5
vpsubq %ymm14,%ymm6,%ymm6
vpsubq %ymm15,%ymm7,%ymm7
vpsrlq $32,%ymm2,%ymm2
vpsrlq $32,%ymm4,%ymm4
vmovshdup %ymm6,%ymm6
#store
vpblendd $0xAA,%ymm3,%ymm2,%ymm2
vpblendd $0xAA,%ymm5,%ymm4,%ymm4
vpblendd $0xAA,%ymm7,%ymm6,%ymm6
vmovdqa %ymm2,(%rdi)
vmovdqa %ymm4,32(%rdi)
vmovdqa %ymm6,64(%rdi)
add $96,%rdi
add $96,%rsi
add $96,%rdx
add $1,%eax
cmp $10,%eax
jb _looptop1
vmovdqa (%rsi),%ymm2
vmovdqa 32(%rsi),%ymm4
vmovdqa (%rdx),%ymm10
vmovdqa 32(%rdx),%ymm12
vpsrlq $32,%ymm2,%ymm3
vpsrlq $32,%ymm4,%ymm5
vmovshdup %ymm10,%ymm11
vmovshdup %ymm12,%ymm13
#mul
vpmuldq %ymm2,%ymm10,%ymm2
vpmuldq %ymm3,%ymm11,%ymm3
vpmuldq %ymm4,%ymm12,%ymm4
vpmuldq %ymm5,%ymm13,%ymm5
#reduce
vpmuldq %ymm0,%ymm2,%ymm10
vpmuldq %ymm0,%ymm3,%ymm11
vpmuldq %ymm0,%ymm4,%ymm12
vpmuldq %ymm0,%ymm5,%ymm13
vpmuldq %ymm1,%ymm10,%ymm10
vpmuldq %ymm1,%ymm11,%ymm11
vpmuldq %ymm1,%ymm12,%ymm12
vpmuldq %ymm1,%ymm13,%ymm13
vpsubq %ymm10,%ymm2,%ymm2
vpsubq %ymm11,%ymm3,%ymm3
vpsubq %ymm12,%ymm4,%ymm4
vpsubq %ymm13,%ymm5,%ymm5
vpsrlq $32,%ymm2,%ymm2
vmovshdup %ymm4,%ymm4
#store
vpblendd $0x55,%ymm2,%ymm3,%ymm2
vpblendd $0x55,%ymm4,%ymm5,%ymm4
vmovdqa %ymm2,(%rdi)
vmovdqa %ymm4,32(%rdi)
ret
.macro pointwise off
#load
vmovdqa \off(%rsi),%ymm6
vmovdqa \off+32(%rsi),%ymm8
vmovdqa \off(%rdx),%ymm10
vmovdqa \off+32(%rdx),%ymm12
vpsrlq $32,%ymm6,%ymm7
vpsrlq $32,%ymm8,%ymm9
vmovshdup %ymm10,%ymm11
vmovshdup %ymm12,%ymm13
#mul
vpmuldq %ymm6,%ymm10,%ymm6
vpmuldq %ymm7,%ymm11,%ymm7
vpmuldq %ymm8,%ymm12,%ymm8
vpmuldq %ymm9,%ymm13,%ymm9
.endm
.macro acc
vpaddq %ymm6,%ymm2,%ymm2
vpaddq %ymm7,%ymm3,%ymm3
vpaddq %ymm8,%ymm4,%ymm4
vpaddq %ymm9,%ymm5,%ymm5
.endm
.global cdecl(PQCLEAN_MLDSA87_AVX2_pointwise_acc_avx)
.global _cdecl(PQCLEAN_MLDSA87_AVX2_pointwise_acc_avx)
cdecl(PQCLEAN_MLDSA87_AVX2_pointwise_acc_avx):
_cdecl(PQCLEAN_MLDSA87_AVX2_pointwise_acc_avx):
#consts
vmovdqa _8XQINV*4(%rcx),%ymm0
vmovdqa _8XQ*4(%rcx),%ymm1
xor %eax,%eax
_looptop2:
pointwise 0
#mov
vmovdqa %ymm6,%ymm2
vmovdqa %ymm7,%ymm3
vmovdqa %ymm8,%ymm4
vmovdqa %ymm9,%ymm5
pointwise 1024
acc
pointwise 2048
acc
pointwise 3072
acc
pointwise 4096
acc
pointwise 5120
acc
pointwise 6144
acc
#reduce
vpmuldq %ymm0,%ymm2,%ymm6
vpmuldq %ymm0,%ymm3,%ymm7
vpmuldq %ymm0,%ymm4,%ymm8
vpmuldq %ymm0,%ymm5,%ymm9
vpmuldq %ymm1,%ymm6,%ymm6
vpmuldq %ymm1,%ymm7,%ymm7
vpmuldq %ymm1,%ymm8,%ymm8
vpmuldq %ymm1,%ymm9,%ymm9
vpsubq %ymm6,%ymm2,%ymm2
vpsubq %ymm7,%ymm3,%ymm3
vpsubq %ymm8,%ymm4,%ymm4
vpsubq %ymm9,%ymm5,%ymm5
vpsrlq $32,%ymm2,%ymm2
vmovshdup %ymm4,%ymm4
#store
vpblendd $0xAA,%ymm3,%ymm2,%ymm2
vpblendd $0xAA,%ymm5,%ymm4,%ymm4
vmovdqa %ymm2,(%rdi)
vmovdqa %ymm4,32(%rdi)
add $64,%rsi
add $64,%rdx
add $64,%rdi
add $1,%eax
cmp $16,%eax
jb _looptop2
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 1,092
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-87/avx2/shuffle.S
|
#include "cdecl.h"
.include "shuffle.inc"
.text
nttunpack128_avx:
#load
vmovdqa (%rdi),%ymm4
vmovdqa 32(%rdi),%ymm5
vmovdqa 64(%rdi),%ymm6
vmovdqa 96(%rdi),%ymm7
vmovdqa 128(%rdi),%ymm8
vmovdqa 160(%rdi),%ymm9
vmovdqa 192(%rdi),%ymm10
vmovdqa 224(%rdi),%ymm11
shuffle8 4,8,3,8
shuffle8 5,9,4,9
shuffle8 6,10,5,10
shuffle8 7,11,6,11
shuffle4 3,5,7,5
shuffle4 8,10,3,10
shuffle4 4,6,8,6
shuffle4 9,11,4,11
shuffle2 7,8,9,8
shuffle2 5,6,7,6
shuffle2 3,4,5,4
shuffle2 10,11,3,11
#store
vmovdqa %ymm9,(%rdi)
vmovdqa %ymm8,32(%rdi)
vmovdqa %ymm7,64(%rdi)
vmovdqa %ymm6,96(%rdi)
vmovdqa %ymm5,128(%rdi)
vmovdqa %ymm4,160(%rdi)
vmovdqa %ymm3,192(%rdi)
vmovdqa %ymm11,224(%rdi)
ret
.global cdecl(PQCLEAN_MLDSA87_AVX2_nttunpack_avx)
.global _cdecl(PQCLEAN_MLDSA87_AVX2_nttunpack_avx)
cdecl(PQCLEAN_MLDSA87_AVX2_nttunpack_avx):
_cdecl(PQCLEAN_MLDSA87_AVX2_nttunpack_avx):
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 26,021
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-87/avx2/f1600x4.S
|
/* Taken from Bas Westerbaan's new 4-way SHAKE implementation
* for Sphincs+ (https://github.com/sphincs/sphincsplus/pull/14/),
* but uses vpshufb for byte-granular rotations as in the Keccak Code Package. */
#include "cdecl.h"
.data
.p2align 5
rho8:
.byte 7,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14,7,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14
rho56:
.byte 1,2,3,4,5,6,7,0,9,10,11,12,13,14,15,8,1,2,3,4,5,6,7,0,9,10,11,12,13,14,15,8
.text
.global cdecl(PQCLEAN_MLDSA87_AVX2_f1600x4)
.global _cdecl(PQCLEAN_MLDSA87_AVX2_f1600x4)
cdecl(PQCLEAN_MLDSA87_AVX2_f1600x4):
_cdecl(PQCLEAN_MLDSA87_AVX2_f1600x4):
vmovdqa rho8(%rip), %ymm0
movq $6, %rax
looptop:
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 192(%rdi), %ymm4, %ymm9
vpxor 384(%rdi), %ymm3, %ymm10
vpxor 576(%rdi), %ymm2, %ymm11
vpxor 768(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 0(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 768(%rdi)
vpxor 96(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 320(%rdi), %ymm5, %ymm10
vpxor 512(%rdi), %ymm4, %ymm11
vpxor 704(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 32(%rdi), %ymm4, %ymm8
vpxor 224(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 608(%rdi), %ymm1, %ymm11
vpxor 640(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 608(%rdi)
vpxor 128(%rdi), %ymm1, %ymm8
vpxor 160(%rdi), %ymm5, %ymm9
vpxor 352(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 736(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 128(%rdi)
vpxor 64(%rdi), %ymm3, %ymm8
vpxor 256(%rdi), %ymm2, %ymm9
vpxor 448(%rdi), %ymm1, %ymm10
vpxor 480(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 448(%rdi)
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 512(%rdi), %ymm4, %ymm9
vpxor 224(%rdi), %ymm3, %ymm10
vpxor 736(%rdi), %ymm2, %ymm11
vpxor 448(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 8(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 448(%rdi)
vpxor 576(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 640(%rdi), %ymm5, %ymm10
vpxor 352(%rdi), %ymm4, %ymm11
vpxor 64(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 192(%rdi), %ymm4, %ymm8
vpxor 704(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 128(%rdi), %ymm1, %ymm11
vpxor 480(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 128(%rdi)
vpxor 768(%rdi), %ymm1, %ymm8
vpxor 320(%rdi), %ymm5, %ymm9
vpxor 32(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 256(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 768(%rdi)
vpxor 384(%rdi), %ymm3, %ymm8
vpxor 96(%rdi), %ymm2, %ymm9
vpxor 608(%rdi), %ymm1, %ymm10
vpxor 160(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 608(%rdi)
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 352(%rdi), %ymm4, %ymm9
vpxor 704(%rdi), %ymm3, %ymm10
vpxor 256(%rdi), %ymm2, %ymm11
vpxor 608(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 16(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 608(%rdi)
vpxor 736(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 480(%rdi), %ymm5, %ymm10
vpxor 32(%rdi), %ymm4, %ymm11
vpxor 384(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 512(%rdi), %ymm4, %ymm8
vpxor 64(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 768(%rdi), %ymm1, %ymm11
vpxor 160(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 768(%rdi)
vpxor 448(%rdi), %ymm1, %ymm8
vpxor 640(%rdi), %ymm5, %ymm9
vpxor 192(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 96(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 448(%rdi)
vpxor 224(%rdi), %ymm3, %ymm8
vpxor 576(%rdi), %ymm2, %ymm9
vpxor 128(%rdi), %ymm1, %ymm10
vpxor 320(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 128(%rdi)
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 32(%rdi), %ymm4, %ymm9
vpxor 64(%rdi), %ymm3, %ymm10
vpxor 96(%rdi), %ymm2, %ymm11
vpxor 128(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 24(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 128(%rdi)
vpxor 256(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 160(%rdi), %ymm5, %ymm10
vpxor 192(%rdi), %ymm4, %ymm11
vpxor 224(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 352(%rdi), %ymm4, %ymm8
vpxor 384(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 448(%rdi), %ymm1, %ymm11
vpxor 320(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 448(%rdi)
vpxor 608(%rdi), %ymm1, %ymm8
vpxor 480(%rdi), %ymm5, %ymm9
vpxor 512(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 576(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 608(%rdi)
vpxor 704(%rdi), %ymm3, %ymm8
vpxor 736(%rdi), %ymm2, %ymm9
vpxor 768(%rdi), %ymm1, %ymm10
vpxor 640(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 768(%rdi)
addq $32, %rsi
subq $1, %rax
jnz looptop
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 4,490
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-87/avx2/ntt.S
|
#include "cdecl.h"
.include "shuffle.inc"
.macro butterfly l,h,zl0=1,zl1=1,zh0=2,zh1=2
vpmuldq %ymm\zl0,%ymm\h,%ymm13
vmovshdup %ymm\h,%ymm12
vpmuldq %ymm\zl1,%ymm12,%ymm14
vpmuldq %ymm\zh0,%ymm\h,%ymm\h
vpmuldq %ymm\zh1,%ymm12,%ymm12
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vmovshdup %ymm\h,%ymm\h
vpblendd $0xAA,%ymm12,%ymm\h,%ymm\h
vpsubd %ymm\h,%ymm\l,%ymm12
vpaddd %ymm\h,%ymm\l,%ymm\l
vmovshdup %ymm13,%ymm13
vpblendd $0xAA,%ymm14,%ymm13,%ymm13
vpaddd %ymm13,%ymm12,%ymm\h
vpsubd %ymm13,%ymm\l,%ymm\l
.endm
.macro levels0t1 off
/* level 0 */
vpbroadcastd (_ZETAS_QINV+1)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+1)*4(%rsi),%ymm2
vmovdqa 0+32*\off(%rdi),%ymm4
vmovdqa 128+32*\off(%rdi),%ymm5
vmovdqa 256+32*\off(%rdi),%ymm6
vmovdqa 384+32*\off(%rdi),%ymm7
vmovdqa 512+32*\off(%rdi),%ymm8
vmovdqa 640+32*\off(%rdi),%ymm9
vmovdqa 768+32*\off(%rdi),%ymm10
vmovdqa 896+32*\off(%rdi),%ymm11
butterfly 4,8
butterfly 5,9
butterfly 6,10
butterfly 7,11
/* level 1 */
vpbroadcastd (_ZETAS_QINV+2)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+2)*4(%rsi),%ymm2
butterfly 4,6
butterfly 5,7
vpbroadcastd (_ZETAS_QINV+3)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+3)*4(%rsi),%ymm2
butterfly 8,10
butterfly 9,11
vmovdqa %ymm4, 0+32*\off(%rdi)
vmovdqa %ymm5,128+32*\off(%rdi)
vmovdqa %ymm6,256+32*\off(%rdi)
vmovdqa %ymm7,384+32*\off(%rdi)
vmovdqa %ymm8,512+32*\off(%rdi)
vmovdqa %ymm9,640+32*\off(%rdi)
vmovdqa %ymm10,768+32*\off(%rdi)
vmovdqa %ymm11,896+32*\off(%rdi)
.endm
.macro levels2t7 off
/* level 2 */
vmovdqa 256*\off+ 0(%rdi),%ymm4
vmovdqa 256*\off+ 32(%rdi),%ymm5
vmovdqa 256*\off+ 64(%rdi),%ymm6
vmovdqa 256*\off+ 96(%rdi),%ymm7
vmovdqa 256*\off+128(%rdi),%ymm8
vmovdqa 256*\off+160(%rdi),%ymm9
vmovdqa 256*\off+192(%rdi),%ymm10
vmovdqa 256*\off+224(%rdi),%ymm11
vpbroadcastd (_ZETAS_QINV+4+\off)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+4+\off)*4(%rsi),%ymm2
butterfly 4,8
butterfly 5,9
butterfly 6,10
butterfly 7,11
shuffle8 4,8,3,8
shuffle8 5,9,4,9
shuffle8 6,10,5,10
shuffle8 7,11,6,11
/* level 3 */
vmovdqa (_ZETAS_QINV+8+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+8+8*\off)*4(%rsi),%ymm2
butterfly 3,5
butterfly 8,10
butterfly 4,6
butterfly 9,11
shuffle4 3,5,7,5
shuffle4 8,10,3,10
shuffle4 4,6,8,6
shuffle4 9,11,4,11
/* level 4 */
vmovdqa (_ZETAS_QINV+40+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+40+8*\off)*4(%rsi),%ymm2
butterfly 7,8
butterfly 5,6
butterfly 3,4
butterfly 10,11
shuffle2 7,8,9,8
shuffle2 5,6,7,6
shuffle2 3,4,5,4
shuffle2 10,11,3,11
/* level 5 */
vmovdqa (_ZETAS_QINV+72+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+72+8*\off)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 9,5,1,10,2,15
butterfly 8,4,1,10,2,15
butterfly 7,3,1,10,2,15
butterfly 6,11,1,10,2,15
/* level 6 */
vmovdqa (_ZETAS_QINV+104+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+104+8*\off)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 9,7,1,10,2,15
butterfly 8,6,1,10,2,15
vmovdqa (_ZETAS_QINV+104+8*\off+32)*4(%rsi),%ymm1
vmovdqa (_ZETAS+104+8*\off+32)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 5,3,1,10,2,15
butterfly 4,11,1,10,2,15
/* level 7 */
vmovdqa (_ZETAS_QINV+168+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 9,8,1,10,2,15
vmovdqa (_ZETAS_QINV+168+8*\off+32)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off+32)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 7,6,1,10,2,15
vmovdqa (_ZETAS_QINV+168+8*\off+64)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off+64)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 5,4,1,10,2,15
vmovdqa (_ZETAS_QINV+168+8*\off+96)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off+96)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 3,11,1,10,2,15
vmovdqa %ymm9,256*\off+ 0(%rdi)
vmovdqa %ymm8,256*\off+ 32(%rdi)
vmovdqa %ymm7,256*\off+ 64(%rdi)
vmovdqa %ymm6,256*\off+ 96(%rdi)
vmovdqa %ymm5,256*\off+128(%rdi)
vmovdqa %ymm4,256*\off+160(%rdi)
vmovdqa %ymm3,256*\off+192(%rdi)
vmovdqa %ymm11,256*\off+224(%rdi)
.endm
.text
.global cdecl(PQCLEAN_MLDSA87_AVX2_ntt_avx)
.global _cdecl(PQCLEAN_MLDSA87_AVX2_ntt_avx)
cdecl(PQCLEAN_MLDSA87_AVX2_ntt_avx):
_cdecl(PQCLEAN_MLDSA87_AVX2_ntt_avx):
vmovdqa _8XQ*4(%rsi),%ymm0
levels0t1 0
levels0t1 1
levels0t1 2
levels0t1 3
levels2t7 0
levels2t7 1
levels2t7 2
levels2t7 3
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 5,851
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-87/avx2/invntt.S
|
#include "cdecl.h"
.include "shuffle.inc"
.macro butterfly l,h,zl0=1,zl1=1,zh0=2,zh1=2
vpsubd %ymm\l,%ymm\h,%ymm12
vpaddd %ymm\h,%ymm\l,%ymm\l
vpmuldq %ymm\zl0,%ymm12,%ymm13
vmovshdup %ymm12,%ymm\h
vpmuldq %ymm\zl1,%ymm\h,%ymm14
vpmuldq %ymm\zh0,%ymm12,%ymm12
vpmuldq %ymm\zh1,%ymm\h,%ymm\h
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vpsubd %ymm13,%ymm12,%ymm12
vpsubd %ymm14,%ymm\h,%ymm\h
vmovshdup %ymm12,%ymm12
vpblendd $0xAA,%ymm\h,%ymm12,%ymm\h
.endm
.macro levels0t5 off
vmovdqa 256*\off+ 0(%rdi),%ymm4
vmovdqa 256*\off+ 32(%rdi),%ymm5
vmovdqa 256*\off+ 64(%rdi),%ymm6
vmovdqa 256*\off+ 96(%rdi),%ymm7
vmovdqa 256*\off+128(%rdi),%ymm8
vmovdqa 256*\off+160(%rdi),%ymm9
vmovdqa 256*\off+192(%rdi),%ymm10
vmovdqa 256*\off+224(%rdi),%ymm11
/* level 0 */
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-8)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-8)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 4,5,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-40)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-40)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 6,7,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-72)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-72)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 8,9,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-104)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-104)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 10,11,1,3,2,15
/* level 1 */
vpermq $0x1B,(_ZETAS_QINV+168-8*\off-8)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+168-8*\off-8)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 4,6,1,3,2,15
butterfly 5,7,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+168-8*\off-40)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+168-8*\off-40)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 8,10,1,3,2,15
butterfly 9,11,1,3,2,15
/* level 2 */
vpermq $0x1B,(_ZETAS_QINV+104-8*\off-8)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+104-8*\off-8)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 4,8,1,3,2,15
butterfly 5,9,1,3,2,15
butterfly 6,10,1,3,2,15
butterfly 7,11,1,3,2,15
/* level 3 */
shuffle2 4,5,3,5
shuffle2 6,7,4,7
shuffle2 8,9,6,9
shuffle2 10,11,8,11
vpermq $0x1B,(_ZETAS_QINV+72-8*\off-8)*4(%rsi),%ymm1
vpermq $0x1B,(_ZETAS+72-8*\off-8)*4(%rsi),%ymm2
butterfly 3,5
butterfly 4,7
butterfly 6,9
butterfly 8,11
/* level 4 */
shuffle4 3,4,10,4
shuffle4 6,8,3,8
shuffle4 5,7,6,7
shuffle4 9,11,5,11
vpermq $0x1B,(_ZETAS_QINV+40-8*\off-8)*4(%rsi),%ymm1
vpermq $0x1B,(_ZETAS+40-8*\off-8)*4(%rsi),%ymm2
butterfly 10,4
butterfly 3,8
butterfly 6,7
butterfly 5,11
/* level 5 */
shuffle8 10,3,9,3
shuffle8 6,5,10,5
shuffle8 4,8,6,8
shuffle8 7,11,4,11
vpbroadcastd (_ZETAS_QINV+7-\off)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+7-\off)*4(%rsi),%ymm2
butterfly 9,3
butterfly 10,5
butterfly 6,8
butterfly 4,11
vmovdqa %ymm9,256*\off+ 0(%rdi)
vmovdqa %ymm10,256*\off+ 32(%rdi)
vmovdqa %ymm6,256*\off+ 64(%rdi)
vmovdqa %ymm4,256*\off+ 96(%rdi)
vmovdqa %ymm3,256*\off+128(%rdi)
vmovdqa %ymm5,256*\off+160(%rdi)
vmovdqa %ymm8,256*\off+192(%rdi)
vmovdqa %ymm11,256*\off+224(%rdi)
.endm
.macro levels6t7 off
vmovdqa 0+32*\off(%rdi),%ymm4
vmovdqa 128+32*\off(%rdi),%ymm5
vmovdqa 256+32*\off(%rdi),%ymm6
vmovdqa 384+32*\off(%rdi),%ymm7
vmovdqa 512+32*\off(%rdi),%ymm8
vmovdqa 640+32*\off(%rdi),%ymm9
vmovdqa 768+32*\off(%rdi),%ymm10
vmovdqa 896+32*\off(%rdi),%ymm11
/* level 6 */
vpbroadcastd (_ZETAS_QINV+3)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+3)*4(%rsi),%ymm2
butterfly 4,6
butterfly 5,7
vpbroadcastd (_ZETAS_QINV+2)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+2)*4(%rsi),%ymm2
butterfly 8,10
butterfly 9,11
/* level 7 */
vpbroadcastd (_ZETAS_QINV+0)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+0)*4(%rsi),%ymm2
butterfly 4,8
butterfly 5,9
butterfly 6,10
butterfly 7,11
vmovdqa %ymm8,512+32*\off(%rdi)
vmovdqa %ymm9,640+32*\off(%rdi)
vmovdqa %ymm10,768+32*\off(%rdi)
vmovdqa %ymm11,896+32*\off(%rdi)
vmovdqa (_8XDIV_QINV)*4(%rsi),%ymm1
vmovdqa (_8XDIV)*4(%rsi),%ymm2
vpmuldq %ymm1,%ymm4,%ymm12
vpmuldq %ymm1,%ymm5,%ymm13
vmovshdup %ymm4,%ymm8
vmovshdup %ymm5,%ymm9
vpmuldq %ymm1,%ymm8,%ymm14
vpmuldq %ymm1,%ymm9,%ymm15
vpmuldq %ymm2,%ymm4,%ymm4
vpmuldq %ymm2,%ymm5,%ymm5
vpmuldq %ymm2,%ymm8,%ymm8
vpmuldq %ymm2,%ymm9,%ymm9
vpmuldq %ymm0,%ymm12,%ymm12
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vpmuldq %ymm0,%ymm15,%ymm15
vpsubd %ymm12,%ymm4,%ymm4
vpsubd %ymm13,%ymm5,%ymm5
vpsubd %ymm14,%ymm8,%ymm8
vpsubd %ymm15,%ymm9,%ymm9
vmovshdup %ymm4,%ymm4
vmovshdup %ymm5,%ymm5
vpblendd $0xAA,%ymm8,%ymm4,%ymm4
vpblendd $0xAA,%ymm9,%ymm5,%ymm5
vpmuldq %ymm1,%ymm6,%ymm12
vpmuldq %ymm1,%ymm7,%ymm13
vmovshdup %ymm6,%ymm8
vmovshdup %ymm7,%ymm9
vpmuldq %ymm1,%ymm8,%ymm14
vpmuldq %ymm1,%ymm9,%ymm15
vpmuldq %ymm2,%ymm6,%ymm6
vpmuldq %ymm2,%ymm7,%ymm7
vpmuldq %ymm2,%ymm8,%ymm8
vpmuldq %ymm2,%ymm9,%ymm9
vpmuldq %ymm0,%ymm12,%ymm12
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vpmuldq %ymm0,%ymm15,%ymm15
vpsubd %ymm12,%ymm6,%ymm6
vpsubd %ymm13,%ymm7,%ymm7
vpsubd %ymm14,%ymm8,%ymm8
vpsubd %ymm15,%ymm9,%ymm9
vmovshdup %ymm6,%ymm6
vmovshdup %ymm7,%ymm7
vpblendd $0xAA,%ymm8,%ymm6,%ymm6
vpblendd $0xAA,%ymm9,%ymm7,%ymm7
vmovdqa %ymm4, 0+32*\off(%rdi)
vmovdqa %ymm5,128+32*\off(%rdi)
vmovdqa %ymm6,256+32*\off(%rdi)
vmovdqa %ymm7,384+32*\off(%rdi)
.endm
.text
.global cdecl(PQCLEAN_MLDSA87_AVX2_invntt_avx)
.global _cdecl(PQCLEAN_MLDSA87_AVX2_invntt_avx)
cdecl(PQCLEAN_MLDSA87_AVX2_invntt_avx):
_cdecl(PQCLEAN_MLDSA87_AVX2_invntt_avx):
vmovdqa _8XQ*4(%rsi),%ymm0
levels0t5 0
levels0t5 1
levels0t5 2
levels0t5 3
levels6t7 0
levels6t7 1
levels6t7 2
levels6t7 3
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 19,073
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-87/aarch64/__asm_iNTT.S
|
/*
* We offer
* CC0 1.0 Universal or the following MIT License for this file.
* You may freely choose one of them that applies.
*
* MIT License
*
* Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang
* Copyright (c) 2023: Vincent Hwang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "macros.inc"
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_intt_SIMD_top
.global _PQCLEAN_MLDSA87_AARCH64__asm_intt_SIMD_top
PQCLEAN_MLDSA87_AARCH64__asm_intt_SIMD_top:
_PQCLEAN_MLDSA87_AARCH64__asm_intt_SIMD_top:
push_all
Q .req w20
Qhalf .req w21
nQhalf .req w22
invNR2ph .req w24
invNR2dp .req w25
invNWR2ph .req w26
invNWR2dp .req w27
src .req x0
counter .req x19
ldr Q, [x2, #0]
lsr Qhalf, Q, #1
neg nQhalf, Qhalf
ldr invNR2ph, [x2, #16]
ldr invNR2dp, [x2, #20]
ldr invNWR2ph, [x2, #24]
ldr invNWR2dp, [x2, #28]
ldr q20, [x1, #0*16]
ldr q21, [x1, #1*16]
ldr q22, [x1, #2*16]
ldr q23, [x1, #3*16]
ldr q24, [x1, #4*16]
ldr q25, [x1, #5*16]
ldr q26, [x1, #6*16]
ldr q27, [x1, #7*16]
mov v20.S[0], Q
ldr q0, [src, # 0*64]
ldr q1, [src, # 1*64]
ldr q2, [src, # 2*64]
ldr q3, [src, # 3*64]
ldr q4, [src, # 4*64]
ldr q5, [src, # 5*64]
ldr q6, [src, # 6*64]
ldr q7, [src, # 7*64]
qq_butterfly_botll \
v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, \
src, \
q8, q9, q10, q11, \
#8*64, #9*64, #10*64, #11*64, \
src, \
q12, q13, q14, q15, \
#12*64, #13*64, #14*64, #15*64
qq_butterfly_mix_rev v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v20, v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mix_rev v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v20, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3
qq_butterfly_mix_rev v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v20, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3
qq_butterfly_mix_rev v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v20, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1
qq_butterfly_mix_rev v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_butterfly_top v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_sub_add v16, v17, v18, v19, v28, v29, v30, v31, v0, v2, v4, v6, v8, v10, v12, v14
qq_sub_add v0, v2, v4, v6, v8, v10, v12, v14, v1, v3, v5, v7, v9, v11, v13, v15
mov v20.S[2], invNR2ph
mov v20.S[3], invNR2dp
qq_montgomery_mul v1, v3, v5, v7, v0, v2, v4, v6, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v0, v2, v4, v6, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
mov v20.S[2], invNWR2ph
mov v20.S[3], invNWR2dp
qq_montgomery_mul v9, v11, v13, v15, v8, v10, v12, v14, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v8, v10, v12, v14, v28, v29, v30, v31, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
mov counter, #3
_intt_top_loop:
dup v29.4S, Q
dup v30.4S, Qhalf
dup v31.4S, nQhalf
cmge v18.4S, v31.4S, v0.4S
cmge v19.4S, v31.4S, v1.4S
cmge v16.4S, v0.4S, v30.4S
cmge v17.4S, v1.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v0.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v2.4S
mla v1.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v3.4S
str q0, [src, #0*64]
cmge v16.4S, v2.4S, v30.4S
ldr q0, [src, #(16 + 0*64)]
str q1, [src, #1*64]
cmge v17.4S, v3.4S, v30.4S
ldr q1, [src, #(16 + 1*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v2.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v4.4S
mla v3.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v5.4S
str q2, [src, #2*64]
cmge v16.4S, v4.4S, v30.4S
ldr q2, [src, #(16 + 2*64)]
str q3, [src, #3*64]
cmge v17.4S, v5.4S, v30.4S
ldr q3, [src, #(16 + 3*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v4.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v6.4S
mla v5.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v7.4S
str q4, [src, #4*64]
cmge v16.4S, v6.4S, v30.4S
ldr q4, [src, #(16 + 4*64)]
str q5, [src, #5*64]
cmge v17.4S, v7.4S, v30.4S
ldr q5, [src, #(16 + 5*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v6.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v8.4S
mla v7.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v9.4S
str q6, [src, #6*64]
cmge v16.4S, v8.4S, v30.4S
ldr q6, [src, #(16 + 6*64)]
str q7, [src, #7*64]
cmge v17.4S, v9.4S, v30.4S
ldr q7, [src, #(16 + 7*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v8.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v10.4S
mla v9.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v11.4S
str q8, [src, #8*64]
cmge v16.4S, v10.4S, v30.4S
str q9, [src, #9*64]
cmge v17.4S, v11.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v10.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v12.4S
mla v11.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v13.4S
str q10, [src, #10*64]
cmge v16.4S, v12.4S, v30.4S
str q11, [src, #11*64]
cmge v17.4S, v13.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v12.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v14.4S
mla v13.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v15.4S
str q12, [src, #12*64]
cmge v16.4S, v14.4S, v30.4S
str q13, [src, #13*64]
cmge v17.4S, v15.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v14.4S, v16.4S, v29.4S
mla v15.4S, v17.4S, v29.4S
str q14, [src, #14*64]
str q15, [src, #15*64]
add src, src, #16
qq_butterfly_botll \
v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, \
src, \
q8, q9, q10, q11, \
#8*64, #9*64, #10*64, #11*64, \
src, \
q12, q13, q14, q15, \
#12*64, #13*64, #14*64, #15*64
qq_butterfly_mix_rev v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v20, v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mix_rev v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v20, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3
qq_butterfly_mix_rev v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v20, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3
qq_butterfly_mix_rev v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v20, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1
qq_butterfly_mix_rev v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_butterfly_top v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_sub_add v16, v17, v18, v19, v28, v29, v30, v31, v0, v2, v4, v6, v8, v10, v12, v14
qq_sub_add v0, v2, v4, v6, v8, v10, v12, v14, v1, v3, v5, v7, v9, v11, v13, v15
mov v20.S[2], invNR2ph
mov v20.S[3], invNR2dp
qq_montgomery_mul v1, v3, v5, v7, v0, v2, v4, v6, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v0, v2, v4, v6, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
mov v20.S[2], invNWR2ph
mov v20.S[3], invNWR2dp
qq_montgomery_mul v9, v11, v13, v15, v8, v10, v12, v14, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v8, v10, v12, v14, v28, v29, v30, v31, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
sub counter, counter, #1
cbnz counter, _intt_top_loop
dup v29.4S, Q
dup v30.4S, Qhalf
dup v31.4S, nQhalf
cmge v18.4S, v31.4S, v0.4S
cmge v19.4S, v31.4S, v1.4S
cmge v16.4S, v0.4S, v30.4S
cmge v17.4S, v1.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v0.4S, v16.4S, v29.4S
mla v1.4S, v17.4S, v29.4S
str q0, [src, #0*64]
str q1, [src, #1*64]
cmge v18.4S, v31.4S, v2.4S
cmge v19.4S, v31.4S, v3.4S
cmge v16.4S, v2.4S, v30.4S
cmge v17.4S, v3.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v2.4S, v16.4S, v29.4S
mla v3.4S, v17.4S, v29.4S
str q2, [src, #2*64]
str q3, [src, #3*64]
cmge v18.4S, v31.4S, v4.4S
cmge v19.4S, v31.4S, v5.4S
cmge v16.4S, v4.4S, v30.4S
cmge v17.4S, v5.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v4.4S, v16.4S, v29.4S
mla v5.4S, v17.4S, v29.4S
str q4, [src, #4*64]
str q5, [src, #5*64]
cmge v18.4S, v31.4S, v6.4S
cmge v19.4S, v31.4S, v7.4S
cmge v16.4S, v6.4S, v30.4S
cmge v17.4S, v7.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v6.4S, v16.4S, v29.4S
mla v7.4S, v17.4S, v29.4S
str q6, [src, #6*64]
str q7, [src, #7*64]
cmge v18.4S, v31.4S, v8.4S
cmge v19.4S, v31.4S, v9.4S
cmge v16.4S, v8.4S, v30.4S
cmge v17.4S, v9.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v8.4S, v16.4S, v29.4S
mla v9.4S, v17.4S, v29.4S
str q8, [src, #8*64]
str q9, [src, #9*64]
cmge v18.4S, v31.4S, v10.4S
cmge v19.4S, v31.4S, v11.4S
cmge v16.4S, v10.4S, v30.4S
cmge v17.4S, v11.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v10.4S, v16.4S, v29.4S
mla v11.4S, v17.4S, v29.4S
str q10, [src, #10*64]
str q11, [src, #11*64]
cmge v18.4S, v31.4S, v12.4S
cmge v19.4S, v31.4S, v13.4S
cmge v16.4S, v12.4S, v30.4S
cmge v17.4S, v13.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v12.4S, v16.4S, v29.4S
mla v13.4S, v17.4S, v29.4S
str q12, [src, #12*64]
str q13, [src, #13*64]
cmge v18.4S, v31.4S, v14.4S
cmge v19.4S, v31.4S, v15.4S
cmge v16.4S, v14.4S, v30.4S
cmge v17.4S, v15.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v14.4S, v16.4S, v29.4S
mla v15.4S, v17.4S, v29.4S
str q14, [src, #14*64]
str q15, [src, #15*64]
add src, src, #16
.unreq Q
.unreq Qhalf
.unreq nQhalf
.unreq invNR2ph
.unreq invNR2dp
.unreq invNWR2ph
.unreq invNWR2dp
.unreq src
.unreq counter
pop_all
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_intt_SIMD_bot
.global _PQCLEAN_MLDSA87_AARCH64__asm_intt_SIMD_bot
PQCLEAN_MLDSA87_AARCH64__asm_intt_SIMD_bot:
_PQCLEAN_MLDSA87_AARCH64__asm_intt_SIMD_bot:
push_all
Q .req w20
RphRdp .req x21
src0 .req x0
src1 .req x2
table0 .req x28
table1 .req x27
counter .req x19
ldr Q, [x2]
ldr RphRdp, [x2, #8]
add table0, x1, #128
add table1, table0, #1024
add src1, src0, #512
ldr q8, [table0, #4*16]
ldr q9, [table0, #5*16]
ldr q10, [table0, #6*16]
ldr q11, [table0, #7*16]
ldr q24, [table1, #4*16]
ldr q25, [table1, #5*16]
ldr q26, [table1, #6*16]
ldr q27, [table1, #7*16]
ldr q0, [src0, # 0*16]
ldr q1, [src0, # 1*16]
ldr q16, [src1, # 0*16]
ldr q17, [src1, # 1*16]
ldr q2, [src0, # 2*16]
ldr q3, [src0, # 3*16]
ldr q18, [src1, # 2*16]
ldr q19, [src1, # 3*16]
trn_4x4_l4 \
v0, v1, v2, v3, v12, v13, v14, v15, \
table0, \
q4, q5, q6, q7, \
#0*16, #1*16, #2*16, #3*16
trn_4x4_l4 \
v16, v17, v18, v19, v28, v29, v30, v31, \
table1, \
q20, q21, q22, q23, \
#0*16, #1*16, #2*16, #3*16
mov v4.S[0], Q
mov v20.D[0], RphRdp
dq_butterfly_vec_bot v0, v2, v12, v13, v1, v3, v4, v8, v9, v10, v11
dq_butterfly_vec_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v8, v9, v10, v11, v24, v25, v26, v27
dq_butterfly_vec_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v24, v25, v26, v27, v6, v7, v6, v7
dq_butterfly_vec_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v6, v7, v6, v7, v22, v23, v22, v23
mov counter, #7
_intt_bot_loop:
dq_butterfly_vec_top_ltrn_4x4 \
v28, v29, v18, v19, v4, v22, v23, v22, v23, \
table0, \
q8, q9, q10, q11, \
#(128+4*16), #(128+5*16), #(128+6*16), #(128+7*16), \
v0, v1, v2, v3, v12, v13, v14, v15
trn_4x4_l4 \
v16, v17, v18, v19, v28, v29, v30, v31, \
table1, \
q24, q25, q26, q27, \
#(128+4*16), #(128+5*16), #(128+6*16), #(128+7*16)
dq_butterfly_bot v0, v2, v12, v13, v1, v3, v4, v5, 0, 1, v5, 2, 3
dq_butterfly_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3
dq_butterfly_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v21, 0, 1, v21, 2, 3, v4, 2, 3, v4, 2, 3
dq_butterfly_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_top v16, v17, v28, v29, v18, v19, v4, v20, 2, 3, v20, 2, 3
str q2, [src0, # 2*16]
srshr v14.4S, v0.4S, #23
ldr q2, [src0, #(64+ 2*16)]
str q3, [src0, # 3*16]
srshr v15.4S, v1.4S, #23
ldr q3, [src0, #(64+ 3*16)]
str q18, [src1, # 2*16]
srshr v30.4S, v16.4S, #23
ldr q18, [src1, #(64+ 2*16)]
str q19, [src1, # 3*16]
srshr v31.4S, v17.4S, #23
ldr q19, [src1, #(64+ 3*16)]
mls v0.4S, v14.4S, v4.S[0]
str q0, [src0, # 0*16]
ldr q0, [src0, #(64+ 0*16)]
mls v1.4S, v15.4S, v4.S[0]
str q1, [src0, # 1*16]
ldr q1, [src0, #(64+ 1*16)]
mls v16.4S, v30.4S, v4.S[0]
str q16, [src1, # 0*16]
ldr q16, [src1, #(64+ 0*16)]
mls v17.4S, v31.4S, v4.S[0]
str q17, [src1, # 1*16]
ldr q17, [src1, #(64+ 1*16)]
add table0, table0, #128
add table1, table1, #128
add src0, src0, #64
add src1, src1, #64
trn_4x4_l4 \
v0, v1, v2, v3, v12, v13, v14, v15, \
table0, \
q4, q5, q6, q7, \
#0*16, #1*16, #2*16, #3*16
trn_4x4_l4 \
v16, v17, v18, v19, v28, v29, v30, v31, \
table1, \
q20, q21, q22, q23, \
#0*16, #1*16, #2*16, #3*16
mov v4.S[0], Q
mov v20.D[0], RphRdp
dq_butterfly_vec_bot v0, v2, v12, v13, v1, v3, v4, v8, v9, v10, v11
dq_butterfly_vec_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v8, v9, v10, v11, v24, v25, v26, v27
dq_butterfly_vec_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v24, v25, v26, v27, v6, v7, v6, v7
dq_butterfly_vec_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v6, v7, v6, v7, v22, v23, v22, v23
sub counter, counter, #1
cbnz counter, _intt_bot_loop
dq_butterfly_vec_top_trn_4x4 \
v16, v17, v28, v29, v18, v19, v4, v22, v23, v22, v23, \
v0, v1, v2, v3, v12, v13, v14, v15
trn_4x4 v16, v17, v18, v19, v28, v29, v30, v31
dq_butterfly_bot v0, v2, v12, v13, v1, v3, v4, v5, 0, 1, v5, 2, 3
dq_butterfly_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3
dq_butterfly_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v21, 0, 1, v21, 2, 3, v4, 2, 3, v4, 2, 3
dq_butterfly_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_top v16, v17, v28, v29, v18, v19, v4, v20, 2, 3, v20, 2, 3
str q2, [src0, # 2*16]
str q3, [src0, # 3*16]
str q18, [src1, # 2*16]
str q19, [src1, # 3*16]
srshr v14.4S, v0.4S, #23
srshr v15.4S, v1.4S, #23
srshr v30.4S, v16.4S, #23
srshr v31.4S, v17.4S, #23
mls v0.4S, v14.4S, v4.S[0]
mls v1.4S, v15.4S, v4.S[0]
mls v16.4S, v30.4S, v4.S[0]
mls v17.4S, v31.4S, v4.S[0]
str q0, [src0, # 0*16]
str q1, [src0, # 1*16]
str q16, [src1, # 0*16]
str q17, [src1, # 1*16]
add table0, table0, #128
add table1, table1, #128
add src0, src0, #64
add src1, src1, #64
.unreq Q
.unreq RphRdp
.unreq src0
.unreq src1
.unreq table0
.unreq table1
.unreq counter
pop_all
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 17,281
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-87/aarch64/__asm_NTT.S
|
/*
* We offer
* CC0 1.0 Universal or the following MIT License for this file.
* You may freely choose one of them that applies.
*
* MIT License
*
* Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang
* Copyright (c) 2023: Vincent Hwang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "macros.inc"
#include "params.h"
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_ntt_SIMD_top
.global _PQCLEAN_MLDSA87_AARCH64__asm_ntt_SIMD_top
PQCLEAN_MLDSA87_AARCH64__asm_ntt_SIMD_top:
_PQCLEAN_MLDSA87_AARCH64__asm_ntt_SIMD_top:
push_simd
Q .req w8
src .req x0
counter .req x11
ld1 {v20.4S, v21.4S, v22.4S, v23.4S}, [x1], #64
ld1 {v24.4S, v25.4S, v26.4S, v27.4S}, [x1], #64
ldr Q, [x2]
mov v20.S[0], Q
ldr q9, [src, #9*64]
ldr q11, [src, #11*64]
ldr q13, [src, #13*64]
ldr q15, [src, #15*64]
qq_butterfly_topl \
v9, v11, v13, v15, v16, v17, v18, v19, v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q1, q3, q5, q7, \
#1*64, #3*64, #5*64, #7*64
qq_butterfly_mixll \
v1, v3, v5, v7, v9, v11, v13, v15, v16, v17, v18, v19, \
v8, v10, v12, v14, v28, v29, v30, v31, \
v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q8, q10, q12, q14, \
#8*64, #10*64, #12*64, #14*64, \
src, \
q0, q2, q4, q6, \
#0*64, #2*64, #4*64, #6*64
qq_butterfly_mix v0, v2, v4, v6, v8, v10, v12, v14, v28, v29, v30, v31, v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mixssl \
v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, \
v1, v3, v5, v7, v28, v29, v30, v31, \
v20, \
v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, \
src, \
q9, q11, q13, q15, \
#9*64, #11*64, #13*64, #15*64, \
src, \
q8, q10, q12, q14, \
#8*64, #10*64, #12*64, #14*64, \
src, \
q9, q11, q13, q15, \
#(16+9*64), #(16+11*64), #(16+13*64), #(16+15*64)
mov counter, #3
_ntt_top_loop:
qq_butterfly_mixssl \
v0, v2, v4, v6, v1, v3, v5, v7, v28, v29, v30, v31, \
v9, v11, v13, v15, v16, v17, v18, v19, \
v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q1, q3, q5, q7, \
#1*64, #3*64, #5*64, #7*64, \
src, \
q0, q2, q4, q6, \
#0*64, #2*64, #4*64, #6*64, \
src, \
q1, q3, q5, q7, \
#(16+1*64), #(16+3*64), #(16+5*64), #(16+7*64)
qq_butterfly_mixll \
v1, v3, v5, v7, v9, v11, v13, v15, v16, v17, v18, v19, \
v8, v10, v12, v14, v28, v29, v30, v31, \
v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q8, q10, q12, q14, \
#(16+8*64), #(16+10*64), #(16+12*64), #(16+14*64), \
src, \
q0, q2, q4, q6, \
#(16+0*64), #(16+2*64), #(16+4*64), #(16+6*64)
add src, src, #16
qq_butterfly_mix v0, v2, v4, v6, v8, v10, v12, v14, v28, v29, v30, v31, v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mixssl \
v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, \
v1, v3, v5, v7, v28, v29, v30, v31, \
v20, \
v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, \
src, \
q9, q11, q13, q15, \
#9*64, #11*64, #13*64, #15*64, \
src, \
q8, q10, q12, q14, \
#8*64, #10*64, #12*64, #14*64, \
src, \
q9, q11, q13, q15, \
#(16+9*64), #(16+11*64), #(16+13*64), #(16+15*64)
sub counter, counter, #1
cbnz counter, _ntt_top_loop
qq_butterfly_botss \
v0, v2, v4, v6, v1, v3, v5, v7, v28, v29, v30, v31, \
src, \
q1, q3, q5, q7, \
#1*64, #3*64, #5*64, #7*64, \
src, \
q0, q2, q4, q6, \
#0*64, #2*64, #4*64, #6*64
.unreq Q
.unreq src
.unreq counter
pop_simd
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_ntt_SIMD_bot
.global _PQCLEAN_MLDSA87_AARCH64__asm_ntt_SIMD_bot
PQCLEAN_MLDSA87_AARCH64__asm_ntt_SIMD_bot:
_PQCLEAN_MLDSA87_AARCH64__asm_ntt_SIMD_bot:
push_simd
Q .req w8
src .req x0
table0 .req x9
table1 .req x10
counter .req x11
ldr Q, [x2]
add table0, x1, #128
add table1, table0, #1024
ldr q0, [src, #0*16]
ldr q1, [src, #1*16]
ldr q2, [src, #2*16]
ldr q3, [src, #3*16]
ldr q4, [table0, #0*16]
ldr q5, [table0, #1*16]
ldr q20, [table1, #0*16]
ldr q21, [table1, #1*16]
dq_butterfly_topl4 \
v0, v1, v2, v3, v12, v13, v4, v4, 2, 3, v4, 2, 3, \
src, \
q16, q17, q18, q19, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
dq_butterfly_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v16, v18, v17, v19, v28, v29, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v0, v1, v2, v3, v12, v13, v14, v15
dq_butterfly_vec_top_trn_4x4 \
v0, v1, v2, v3, v12, v13, v4, v6, v7, v6, v7, \
v16, v17, v18, v19, v28, v29, v30, v31
dq_butterfly_vec_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, v4, v8, v9, v10, v11, v24, v25, v26, v27
trn_4x4_l4 v0, v1, v2, v3, v8, v9, v10, v11, src, q12, q13, q14, q15, #(64+0*16), #(64+1*16), #(64+2*16), #(64+3*16)
str q0, [src, #0*16]
str q2, [src, #2*16]
dq_butterfly_vec_bot v16, v18, v17, v19, v28, v29, v4, v24, v25, v26, v27
str q1, [src, #1*16]
str q3, [src, #3*16]
add src, src, #64
trn_4x4_l4 v16, v17, v18, v19, v24, v25, v26, v27, src, q28, q29, q30, q31, #(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
sub src, src, #64
dq_butterfly_top2l4s4 \
v12, v13, v14, v15, v0, v1, v4, v4, 2, 3, v4, 2, 3, \
table0, q4, q5, #0*16, #1*16, \
table1, q20, q21, #0*16, #1*16, \
src, \
q16, q17, q18, q19, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
add src, src, #64
dq_butterfly_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v28, v30, v29, v31, v16, v17, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v12, v13, v14, v15, v0, v1, v2, v3
dq_butterfly_vec_top_trn_4x4 \
v12, v13, v14, v15, v0, v1, v4, v6, v7, v6, v7, \
v28, v29, v30, v31, v16, v17, v18, v19
dq_butterfly_vec_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, v4, v8, v9, v10, v11, v24, v25, v26, v27
mov counter, #3
_ntt_bot_loop:
trn_4x4_l4 v12, v13, v14, v15, v8, v9, v10, v11, src, q0, q1, q2, q3, #(64+0*16), #(64+1*16), #(64+2*16), #(64+3*16)
str q12, [src, #0*16]
str q13, [src, #1*16]
dq_butterfly_vec_bot v28, v30, v29, v31, v16, v17, v4, v24, v25, v26, v27
str q14, [src, #2*16]
str q15, [src, #3*16]
add src, src, #64
trn_4x4_l4 v28, v29, v30, v31, v24, v25, v26, v27, src, q16, q17, q18, q19, #(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
sub src, src, #64
dq_butterfly_top2l4s4 \
v0, v1, v2, v3, v12, v13, v4, v4, 2, 3, v4, 2, 3, \
table0, q4, q5, #0*16, #1*16, \
table1, q20, q21, #0*16, #1*16, \
src, \
q28, q29, q30, q31, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
add src, src, #64
dq_butterfly_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v16, v18, v17, v19, v28, v29, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v0, v1, v2, v3, v12, v13, v14, v15
dq_butterfly_vec_top_trn_4x4 \
v0, v1, v2, v3, v12, v13, v4, v6, v7, v6, v7, \
v16, v17, v18, v19, v28, v29, v30, v31
dq_butterfly_vec_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, v4, v8, v9, v10, v11, v24, v25, v26, v27
trn_4x4_l4 v0, v1, v2, v3, v8, v9, v10, v11, src, q12, q13, q14, q15, #(64+0*16), #(64+1*16), #(64+2*16), #(64+3*16)
str q0, [src, #0*16]
str q2, [src, #2*16]
dq_butterfly_vec_bot v16, v18, v17, v19, v28, v29, v4, v24, v25, v26, v27
str q1, [src, #1*16]
str q3, [src, #3*16]
add src, src, #64
trn_4x4_l4 v16, v17, v18, v19, v24, v25, v26, v27, src, q28, q29, q30, q31, #(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
sub src, src, #64
dq_butterfly_top2l4s4 \
v12, v13, v14, v15, v0, v1, v4, v4, 2, 3, v4, 2, 3, \
table0, q4, q5, #0*16, #1*16, \
table1, q20, q21, #0*16, #1*16, \
src, \
q16, q17, q18, q19, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
add src, src, #64
dq_butterfly_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v28, v30, v29, v31, v16, v17, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v12, v13, v14, v15, v0, v1, v2, v3
dq_butterfly_vec_top_trn_4x4 \
v12, v13, v14, v15, v0, v1, v4, v6, v7, v6, v7, \
v28, v29, v30, v31, v16, v17, v18, v19
dq_butterfly_vec_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, v4, v8, v9, v10, v11, v24, v25, v26, v27
sub counter, counter, #1
cbnz counter, _ntt_bot_loop
dq_butterfly_vec_bot v28, v30, v29, v31, v16, v17, v4, v24, v25, v26, v27
trn_4x4 v12, v13, v14, v15, v0, v1, v2, v3
trn_4x4_s4 v28, v29, v30, v31, v16, v17, v18, v19, src, q12, q13, q14, q15, #0*16, #1*16, #2*16, #3*16
str q28, [src, #(512+0*16)]
str q29, [src, #(512+1*16)]
str q30, [src, #(512+2*16)]
str q31, [src, #(512+3*16)]
add src, src, #64
.unreq Q
.unreq src
.unreq table0
.unreq table1
.unreq counter
pop_simd
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 31,124
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-87/aarch64/__asm_poly.S
|
/*
* We offer
* CC0 1.0 Universal or the following MIT License for this file.
* You may freely choose one of them that applies.
*
* MIT License
*
* Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang
* Copyright (c) 2023: Vincent Hwang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "macros.inc"
#include "params.h"
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_10_to_32
.global _PQCLEAN_MLDSA87_AARCH64__asm_10_to_32
PQCLEAN_MLDSA87_AARCH64__asm_10_to_32:
_PQCLEAN_MLDSA87_AARCH64__asm_10_to_32:
mov x7, #16
_10_to_32_loop:
ldr w2, [x1], #4
ubfx w3, w2, #0, #10
str w3, [x0], #4
ubfx w4, w2, #10, #10
str w4, [x0], #4
ubfx w5, w2, #20, #10
str w5, [x0], #4
lsr w6, w2, #30
ldr w2, [x1], #4
ubfx w3, w2, #0, #8
lsl w3, w3, #2
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #8, #10
str w4, [x0], #4
ubfx w5, w2, #18, #10
str w5, [x0], #4
lsr w6, w2, #28
ldr w2, [x1], #4
ubfx w3, w2, #0, #6
lsl w3, w3, #4
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #6, #10
str w4, [x0], #4
ubfx w5, w2, #16, #10
str w5, [x0], #4
lsr w6, w2, #26
ldr w2, [x1], #4
ubfx w3, w2, #0, #4
lsl w3, w3, #6
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #4, #10
str w4, [x0], #4
ubfx w5, w2, #14, #10
str w5, [x0], #4
lsr w6, w2, #24
ldr w2, [x1], #4
ubfx w3, w2, #0, #2
lsl w3, w3, #8
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #2, #10
str w4, [x0], #4
ubfx w5, w2, #12, #10
str w5, [x0], #4
ubfx w6, w2, #22, #10
str w6, [x0], #4
sub x7, x7, #1
cbnz x7, _10_to_32_loop
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_poly_reduce
.global _PQCLEAN_MLDSA87_AARCH64__asm_poly_reduce
PQCLEAN_MLDSA87_AARCH64__asm_poly_reduce:
_PQCLEAN_MLDSA87_AARCH64__asm_poly_reduce:
ldr w4, [x1]
dup v24.4S, w4
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
mov x16, #7
_poly_reduce_loop:
st1 { v4.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 { v5.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 { v6.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 { v7.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
sub x16, x16, #1
cbnz x16, _poly_reduce_loop
st1 { v4.4S}, [x0], #16
st1 { v5.4S}, [x0], #16
st1 { v6.4S}, [x0], #16
st1 { v7.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_poly_caddq
.global _PQCLEAN_MLDSA87_AARCH64__asm_poly_caddq
PQCLEAN_MLDSA87_AARCH64__asm_poly_caddq:
_PQCLEAN_MLDSA87_AARCH64__asm_poly_caddq:
ldr w4, [x1]
dup v24.4S, w4
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
sshr v16.4S, v0.4S, #31
ld1 { v5.4S}, [x1], #16
sshr v17.4S, v1.4S, #31
ld1 { v6.4S}, [x1], #16
sshr v18.4S, v2.4S, #31
ld1 { v7.4S}, [x1], #16
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
mov x16, #7
_poly_caddq_loop:
st1 { v4.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 { v5.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 { v6.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 { v7.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
sshr v16.4S, v0.4S, #31
ld1 { v5.4S}, [x1], #16
sshr v17.4S, v1.4S, #31
ld1 { v6.4S}, [x1], #16
sshr v18.4S, v2.4S, #31
ld1 { v7.4S}, [x1], #16
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
sub x16, x16, #1
cbnz x16, _poly_caddq_loop
st1 { v4.4S}, [x0], #16
st1 { v5.4S}, [x0], #16
st1 { v6.4S}, [x0], #16
st1 { v7.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_poly_freeze
.global _PQCLEAN_MLDSA87_AARCH64__asm_poly_freeze
PQCLEAN_MLDSA87_AARCH64__asm_poly_freeze:
_PQCLEAN_MLDSA87_AARCH64__asm_poly_freeze:
ldr w4, [x1]
dup v24.4S, w4
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
sshr v16.4S, v0.4S, #31
mls v5.4S, v21.4S, v24.4S
sshr v17.4S, v1.4S, #31
mls v6.4S, v22.4S, v24.4S
sshr v18.4S, v2.4S, #31
mls v7.4S, v23.4S, v24.4S
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
mov x16, #8
_poly_freeze_loop:
st1 { v4.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 { v5.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 { v6.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 { v7.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
sshr v16.4S, v0.4S, #31
mls v5.4S, v21.4S, v24.4S
sshr v17.4S, v1.4S, #31
mls v6.4S, v22.4S, v24.4S
sshr v18.4S, v2.4S, #31
mls v7.4S, v23.4S, v24.4S
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
sub x16, x16, #1
cbnz x16, _poly_freeze_loop
st1 { v4.4S}, [x0], #16
st1 { v5.4S}, [x0], #16
st1 { v6.4S}, [x0], #16
st1 { v7.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_poly_power2round
.global _PQCLEAN_MLDSA87_AARCH64__asm_poly_power2round
PQCLEAN_MLDSA87_AARCH64__asm_poly_power2round:
_PQCLEAN_MLDSA87_AARCH64__asm_poly_power2round:
mov w4, #1
dup v28.4S, w4
ld1 { v0.4S}, [x2], #16
ld1 { v1.4S}, [x2], #16
ld1 { v2.4S}, [x2], #16
ld1 { v3.4S}, [x2], #16
ld1 {v20.4S}, [x2], #16
sub v4.4S, v0.4S, v28.4S
ld1 {v21.4S}, [x2], #16
sub v5.4S, v1.4S, v28.4S
ld1 {v22.4S}, [x2], #16
sub v6.4S, v2.4S, v28.4S
ld1 {v23.4S}, [x2], #16
sub v7.4S, v3.4S, v28.4S
sub v24.4S, v20.4S, v28.4S
srshr v16.4S, v4.4S, #13
sub v25.4S, v21.4S, v28.4S
srshr v17.4S, v5.4S, #13
sub v26.4S, v22.4S, v28.4S
srshr v18.4S, v6.4S, #13
sub v27.4S, v23.4S, v28.4S
srshr v19.4S, v7.4S, #13
srshr v28.4S, v24.4S, #13
st1 {v16.4S}, [x0], #16
srshr v29.4S, v25.4S, #13
st1 {v17.4S}, [x0], #16
srshr v30.4S, v26.4S, #13
st1 {v18.4S}, [x0], #16
srshr v31.4S, v27.4S, #13
st1 {v19.4S}, [x0], #16
st1 {v28.4S}, [x0], #16
shl v4.4S, v16.4S, #13
st1 {v29.4S}, [x0], #16
shl v5.4S, v17.4S, #13
st1 {v30.4S}, [x0], #16
shl v6.4S, v18.4S, #13
st1 {v31.4S}, [x0], #16
shl v7.4S, v19.4S, #13
shl v24.4S, v28.4S, #13
sub v16.4S, v0.4S, v4.4S
shl v25.4S, v29.4S, #13
sub v17.4S, v1.4S, v5.4S
shl v26.4S, v30.4S, #13
sub v18.4S, v2.4S, v6.4S
shl v27.4S, v31.4S, #13
sub v19.4S, v3.4S, v7.4S
sub v28.4S, v20.4S, v24.4S
st1 {v16.4S}, [x1], #16
sub v29.4S, v21.4S, v25.4S
st1 {v17.4S}, [x1], #16
sub v30.4S, v22.4S, v26.4S
st1 {v18.4S}, [x1], #16
sub v31.4S, v23.4S, v27.4S
st1 {v19.4S}, [x1], #16
mov x16, #7
_poly_power2round_loop:
st1 {v28.4S}, [x1], #16
dup v28.4S, w4
ld1 { v0.4S}, [x2], #16
st1 {v29.4S}, [x1], #16
ld1 { v1.4S}, [x2], #16
st1 {v30.4S}, [x1], #16
ld1 { v2.4S}, [x2], #16
st1 {v31.4S}, [x1], #16
ld1 { v3.4S}, [x2], #16
ld1 {v20.4S}, [x2], #16
sub v4.4S, v0.4S, v28.4S
ld1 {v21.4S}, [x2], #16
sub v5.4S, v1.4S, v28.4S
ld1 {v22.4S}, [x2], #16
sub v6.4S, v2.4S, v28.4S
ld1 {v23.4S}, [x2], #16
sub v7.4S, v3.4S, v28.4S
sub v24.4S, v20.4S, v28.4S
srshr v16.4S, v4.4S, #13
sub v25.4S, v21.4S, v28.4S
srshr v17.4S, v5.4S, #13
sub v26.4S, v22.4S, v28.4S
srshr v18.4S, v6.4S, #13
sub v27.4S, v23.4S, v28.4S
srshr v19.4S, v7.4S, #13
srshr v28.4S, v24.4S, #13
st1 {v16.4S}, [x0], #16
srshr v29.4S, v25.4S, #13
st1 {v17.4S}, [x0], #16
srshr v30.4S, v26.4S, #13
st1 {v18.4S}, [x0], #16
srshr v31.4S, v27.4S, #13
st1 {v19.4S}, [x0], #16
st1 {v28.4S}, [x0], #16
shl v4.4S, v16.4S, #13
st1 {v29.4S}, [x0], #16
shl v5.4S, v17.4S, #13
st1 {v30.4S}, [x0], #16
shl v6.4S, v18.4S, #13
st1 {v31.4S}, [x0], #16
shl v7.4S, v19.4S, #13
shl v24.4S, v28.4S, #13
sub v16.4S, v0.4S, v4.4S
shl v25.4S, v29.4S, #13
sub v17.4S, v1.4S, v5.4S
shl v26.4S, v30.4S, #13
sub v18.4S, v2.4S, v6.4S
shl v27.4S, v31.4S, #13
sub v19.4S, v3.4S, v7.4S
sub v28.4S, v20.4S, v24.4S
st1 {v16.4S}, [x1], #16
sub v29.4S, v21.4S, v25.4S
st1 {v17.4S}, [x1], #16
sub v30.4S, v22.4S, v26.4S
st1 {v18.4S}, [x1], #16
sub v31.4S, v23.4S, v27.4S
st1 {v19.4S}, [x1], #16
sub x16, x16, #1
cbnz x16, _poly_power2round_loop
st1 {v28.4S}, [x1], #16
st1 {v29.4S}, [x1], #16
st1 {v30.4S}, [x1], #16
st1 {v31.4S}, [x1], #16
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_poly_add
.global _PQCLEAN_MLDSA87_AARCH64__asm_poly_add
PQCLEAN_MLDSA87_AARCH64__asm_poly_add:
_PQCLEAN_MLDSA87_AARCH64__asm_poly_add:
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
add v16.4S, v0.4S, v4.4S
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
add v17.4S, v1.4S, v5.4S
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
add v18.4S, v2.4S, v6.4S
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
add v19.4S, v3.4S, v7.4S
mov x16, #15
_poly_add_loop:
st1 {v16.4S}, [x0], #16
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
add v16.4S, v0.4S, v4.4S
st1 {v17.4S}, [x0], #16
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
add v17.4S, v1.4S, v5.4S
st1 {v18.4S}, [x0], #16
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
add v18.4S, v2.4S, v6.4S
st1 {v19.4S}, [x0], #16
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
add v19.4S, v3.4S, v7.4S
sub x16, x16, #1
cbnz x16, _poly_add_loop
st1 {v16.4S}, [x0], #16
st1 {v17.4S}, [x0], #16
st1 {v18.4S}, [x0], #16
st1 {v19.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_poly_sub
.global _PQCLEAN_MLDSA87_AARCH64__asm_poly_sub
PQCLEAN_MLDSA87_AARCH64__asm_poly_sub:
_PQCLEAN_MLDSA87_AARCH64__asm_poly_sub:
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
sub v16.4S, v0.4S, v4.4S
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
sub v17.4S, v1.4S, v5.4S
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
sub v18.4S, v2.4S, v6.4S
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
sub v19.4S, v3.4S, v7.4S
mov x16, #15
_poly_sub_loop:
st1 {v16.4S}, [x0], #16
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
sub v16.4S, v0.4S, v4.4S
st1 {v17.4S}, [x0], #16
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
sub v17.4S, v1.4S, v5.4S
st1 {v18.4S}, [x0], #16
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
sub v18.4S, v2.4S, v6.4S
st1 {v19.4S}, [x0], #16
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
sub v19.4S, v3.4S, v7.4S
sub x16, x16, #1
cbnz x16, _poly_sub_loop
st1 {v16.4S}, [x0], #16
st1 {v17.4S}, [x0], #16
st1 {v18.4S}, [x0], #16
st1 {v19.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_poly_shiftl
.global _PQCLEAN_MLDSA87_AARCH64__asm_poly_shiftl
PQCLEAN_MLDSA87_AARCH64__asm_poly_shiftl:
_PQCLEAN_MLDSA87_AARCH64__asm_poly_shiftl:
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
shl v16.4S, v0.4S, #13
ld1 { v1.4S}, [x1], #16
shl v17.4S, v1.4S, #13
ld1 { v2.4S}, [x1], #16
shl v18.4S, v2.4S, #13
ld1 { v3.4S}, [x1], #16
shl v19.4S, v3.4S, #13
ld1 { v4.4S}, [x1], #16
shl v20.4S, v4.4S, #13
ld1 { v5.4S}, [x1], #16
shl v21.4S, v5.4S, #13
ld1 { v6.4S}, [x1], #16
shl v22.4S, v6.4S, #13
ld1 { v7.4S}, [x1], #16
shl v23.4S, v7.4S, #13
mov x16, #7
_poly_shiftl_loop:
st1 {v16.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
shl v16.4S, v0.4S, #13
st1 {v17.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
shl v17.4S, v1.4S, #13
st1 {v18.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
shl v18.4S, v2.4S, #13
st1 {v19.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
shl v19.4S, v3.4S, #13
st1 {v20.4S}, [x0], #16
ld1 { v4.4S}, [x1], #16
shl v20.4S, v4.4S, #13
st1 {v21.4S}, [x0], #16
ld1 { v5.4S}, [x1], #16
shl v21.4S, v5.4S, #13
st1 {v22.4S}, [x0], #16
ld1 { v6.4S}, [x1], #16
shl v22.4S, v6.4S, #13
st1 {v23.4S}, [x0], #16
ld1 { v7.4S}, [x1], #16
shl v23.4S, v7.4S, #13
sub x16, x16, #1
cbnz x16, _poly_shiftl_loop
st1 {v16.4S}, [x0], #16
st1 {v17.4S}, [x0], #16
st1 {v18.4S}, [x0], #16
st1 {v19.4S}, [x0], #16
st1 {v20.4S}, [x0], #16
st1 {v21.4S}, [x0], #16
st1 {v22.4S}, [x0], #16
st1 {v23.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_poly_pointwise_montgomery
.global _PQCLEAN_MLDSA87_AARCH64__asm_poly_pointwise_montgomery
PQCLEAN_MLDSA87_AARCH64__asm_poly_pointwise_montgomery:
_PQCLEAN_MLDSA87_AARCH64__asm_poly_pointwise_montgomery:
push_all
ldr w20, [x3, #0]
ldr w21, [x3, #4]
dup v30.4S, w20
dup v31.4S, w21
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x2], #16
ld1 { v5.4S}, [x2], #16
ld1 { v6.4S}, [x2], #16
ld1 { v7.4S}, [x2], #16
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
uzp1 v21.4S, v13.4S, v17.4S
uzp1 v22.4S, v14.4S, v18.4S
uzp1 v23.4S, v15.4S, v19.4S
mul v24.4S, v20.4S, v31.4S
mul v25.4S, v21.4S, v31.4S
mul v26.4S, v22.4S, v31.4S
mul v27.4S, v23.4S, v31.4S
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
mov x16, #15
_poly_pointwise_montgomery_loop:
st1 {v24.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 {v25.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 {v26.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 {v27.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x2], #16
ld1 { v5.4S}, [x2], #16
ld1 { v6.4S}, [x2], #16
ld1 { v7.4S}, [x2], #16
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
uzp1 v21.4S, v13.4S, v17.4S
uzp1 v22.4S, v14.4S, v18.4S
uzp1 v23.4S, v15.4S, v19.4S
mul v24.4S, v20.4S, v31.4S
mul v25.4S, v21.4S, v31.4S
mul v26.4S, v22.4S, v31.4S
mul v27.4S, v23.4S, v31.4S
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
sub x16, x16, #1
cbnz x16, _poly_pointwise_montgomery_loop
st1 {v24.4S}, [x0], #16
st1 {v25.4S}, [x0], #16
st1 {v26.4S}, [x0], #16
st1 {v27.4S}, [x0], #16
pop_all
ret
.align 2
.global PQCLEAN_MLDSA87_AARCH64__asm_polyvecl_pointwise_acc_montgomery
.global _PQCLEAN_MLDSA87_AARCH64__asm_polyvecl_pointwise_acc_montgomery
PQCLEAN_MLDSA87_AARCH64__asm_polyvecl_pointwise_acc_montgomery:
_PQCLEAN_MLDSA87_AARCH64__asm_polyvecl_pointwise_acc_montgomery:
push_all
ldr w20, [x3, #0]
ldr w21, [x3, #4]
add x5, x1, #1024*1
add x6, x2, #1024*1
add x7, x1, #1024*2
add x8, x2, #1024*2
add x9, x1, #1024*3
add x10, x2, #1024*3
#if L > 4
add x11, x1, #1024*4
add x12, x2, #1024*4
#endif
#if L > 5
add x13, x11, #1024*1
add x14, x12, #1024*1
add x15, x11, #1024*2
add x19, x12, #1024*2
#endif
dup v30.4S, w20
dup v31.4S, w21
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x2], #16
ld1 { v5.4S}, [x2], #16
ld1 { v6.4S}, [x2], #16
ld1 { v7.4S}, [x2], #16
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x5], #16
ld1 { v4.4S}, [ x6], #16
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x5], #16
ld1 { v5.4S}, [ x6], #16
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x5], #16
ld1 { v6.4S}, [ x6], #16
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x5], #16
ld1 { v7.4S}, [ x6], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x7], #16
ld1 { v4.4S}, [ x8], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x7], #16
ld1 { v5.4S}, [ x8], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x7], #16
ld1 { v6.4S}, [ x8], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x7], #16
ld1 { v7.4S}, [ x8], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x9], #16
ld1 { v4.4S}, [x10], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x9], #16
ld1 { v5.4S}, [x10], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x9], #16
ld1 { v6.4S}, [x10], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x9], #16
ld1 { v7.4S}, [x10], #16
#if L > 4
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x11], #16
ld1 { v4.4S}, [x12], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x11], #16
ld1 { v5.4S}, [x12], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x11], #16
ld1 { v6.4S}, [x12], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x11], #16
ld1 { v7.4S}, [x12], #16
#endif
#if L > 5
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x13], #16
ld1 { v4.4S}, [x14], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x13], #16
ld1 { v5.4S}, [x14], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x13], #16
ld1 { v6.4S}, [x14], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x13], #16
ld1 { v7.4S}, [x14], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x15], #16
ld1 { v4.4S}, [x19], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x15], #16
ld1 { v5.4S}, [x19], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x15], #16
ld1 { v6.4S}, [x19], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x15], #16
ld1 { v7.4S}, [x19], #16
#endif
mov x16, #15
_polyvecl_pointwise_acc_montgomery_loop:
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
ld1 { v0.4S}, [x1], #16
uzp1 v21.4S, v13.4S, v17.4S
ld1 { v1.4S}, [x1], #16
uzp1 v22.4S, v14.4S, v18.4S
ld1 { v2.4S}, [x1], #16
uzp1 v23.4S, v15.4S, v19.4S
ld1 { v3.4S}, [x1], #16
mul v24.4S, v20.4S, v31.4S
ld1 { v4.4S}, [x2], #16
mul v25.4S, v21.4S, v31.4S
ld1 { v5.4S}, [x2], #16
mul v26.4S, v22.4S, v31.4S
ld1 { v6.4S}, [x2], #16
mul v27.4S, v23.4S, v31.4S
ld1 { v7.4S}, [x2], #16
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x5], #16
st1 {v24.4S}, [x0], #16
ld1 { v4.4S}, [ x6], #16
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x5], #16
st1 {v25.4S}, [x0], #16
ld1 { v5.4S}, [ x6], #16
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x5], #16
st1 {v26.4S}, [x0], #16
ld1 { v6.4S}, [ x6], #16
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x5], #16
st1 {v27.4S}, [x0], #16
ld1 { v7.4S}, [ x6], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x7], #16
ld1 { v4.4S}, [ x8], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x7], #16
ld1 { v5.4S}, [ x8], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x7], #16
ld1 { v6.4S}, [ x8], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x7], #16
ld1 { v7.4S}, [ x8], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x9], #16
ld1 { v4.4S}, [x10], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x9], #16
ld1 { v5.4S}, [x10], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x9], #16
ld1 { v6.4S}, [x10], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x9], #16
ld1 { v7.4S}, [x10], #16
#if L > 4
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x11], #16
ld1 { v4.4S}, [x12], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x11], #16
ld1 { v5.4S}, [x12], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x11], #16
ld1 { v6.4S}, [x12], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x11], #16
ld1 { v7.4S}, [x12], #16
#endif
#if L > 5
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x13], #16
ld1 { v4.4S}, [x14], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x13], #16
ld1 { v5.4S}, [x14], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x13], #16
ld1 { v6.4S}, [x14], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x13], #16
ld1 { v7.4S}, [x14], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x15], #16
ld1 { v4.4S}, [x19], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x15], #16
ld1 { v5.4S}, [x19], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x15], #16
ld1 { v6.4S}, [x19], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x15], #16
ld1 { v7.4S}, [x19], #16
#endif
sub x16, x16, #1
cbnz x16, _polyvecl_pointwise_acc_montgomery_loop
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
uzp1 v21.4S, v13.4S, v17.4S
uzp1 v22.4S, v14.4S, v18.4S
uzp1 v23.4S, v15.4S, v19.4S
mul v24.4S, v20.4S, v31.4S
mul v25.4S, v21.4S, v31.4S
mul v26.4S, v22.4S, v31.4S
mul v27.4S, v23.4S, v31.4S
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
st1 {v24.4S}, [x0], #16
st1 {v25.4S}, [x0], #16
st1 {v26.4S}, [x0], #16
st1 {v27.4S}, [x0], #16
pop_all
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 3,689
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/sphincs-shake-192s-simple/aarch64/f1600x2.s
|
# From https://github.com/bwesterb/armed-keccak
.macro round
# Execute theta, but without xoring into the state yet.
# Compute parities p[i] = a[i] ^ a[5+i] ^ ... ^ a[20+i].
eor3.16b v25, v0, v5, v10
eor3.16b v26, v1, v6, v11
eor3.16b v27, v2, v7, v12
eor3.16b v28, v3, v8, v13
eor3.16b v29, v4, v9, v14
eor3.16b v25, v25, v15, v20
eor3.16b v26, v26, v16, v21
eor3.16b v27, v27, v17, v22
eor3.16b v28, v28, v18, v23
eor3.16b v29, v29, v19, v24
# d[0] = rotl(p[1], 1) ^ p[4]
rax1.2d v30, v29, v26
# d[3] = rotl(p[4], 1) ^ p[2]
rax1.2d v29, v27, v29
# d[1] = rotl(p[2], 1) ^ p[0]
rax1.2d v27, v25, v27
# d[4] = rotl(p[0], 1) ^ p[3]
rax1.2d v25, v28, v25
# d[2] = rotl(p[3], 1) ^ p[1]
rax1.2d v28, v26, v28
# Xor parities from step theta into the state at the same time
# as executing rho and pi.
eor.16b v0, v0, v30
mov.16b v31, v1
xar.2d v1, v6, v27, 20
xar.2d v6, v9, v25, 44
xar.2d v9, v22, v28, 3
xar.2d v22, v14, v25, 25
xar.2d v14, v20, v30, 46
xar.2d v20, v2, v28, 2
xar.2d v2, v12, v28, 21
xar.2d v12, v13, v29, 39
xar.2d v13, v19, v25, 56
xar.2d v19, v23, v29, 8
xar.2d v23, v15, v30, 23
xar.2d v15, v4, v25, 37
xar.2d v4, v24, v25, 50
xar.2d v24, v21, v27, 62
xar.2d v21, v8, v29, 9
xar.2d v8, v16, v27, 19
xar.2d v16, v5, v30, 28
xar.2d v5, v3, v29, 36
xar.2d v3, v18, v29, 43
xar.2d v18, v17, v28, 49
xar.2d v17, v11, v27, 54
xar.2d v11, v7, v28, 58
xar.2d v7, v10, v30, 61
xar.2d v10, v31, v27, 63
# Chi
bcax.16b v25, v0, v2, v1
bcax.16b v26, v1, v3, v2
bcax.16b v2, v2, v4, v3
bcax.16b v3, v3, v0, v4
bcax.16b v4, v4, v1, v0
mov.16b v0, v25
mov.16b v1, v26
bcax.16b v25, v5, v7, v6
bcax.16b v26, v6, v8, v7
bcax.16b v7, v7, v9, v8
bcax.16b v8, v8, v5, v9
bcax.16b v9, v9, v6, v5
mov.16b v5, v25
mov.16b v6, v26
bcax.16b v25, v10, v12, v11
bcax.16b v26, v11, v13, v12
bcax.16b v12, v12, v14, v13
bcax.16b v13, v13, v10, v14
bcax.16b v14, v14, v11, v10
mov.16b v10, v25
mov.16b v11, v26
bcax.16b v25, v15, v17, v16
bcax.16b v26, v16, v18, v17
bcax.16b v17, v17, v19, v18
bcax.16b v18, v18, v15, v19
bcax.16b v19, v19, v16, v15
mov.16b v15, v25
mov.16b v16, v26
bcax.16b v25, v20, v22, v21
bcax.16b v26, v21, v23, v22
bcax.16b v22, v22, v24, v23
bcax.16b v23, v23, v20, v24
bcax.16b v24, v24, v21, v20
mov.16b v20, v25
mov.16b v21, v26
# iota
ld1r {v25.2d}, [x1], #8
eor.16b v0, v0, v25
.endm
.align 4
.global __f1600x2
__f1600x2:
stp d8, d9, [sp,#-16]!
stp d10, d11, [sp,#-16]!
stp d12, d13, [sp,#-16]!
stp d14, d15, [sp,#-16]!
mov x2, x0
mov x3, #24
ld1.2d {v0, v1, v2, v3}, [x0], #64
ld1.2d {v4, v5, v6, v7}, [x0], #64
ld1.2d {v8, v9, v10, v11}, [x0], #64
ld1.2d {v12, v13, v14, v15}, [x0], #64
ld1.2d {v16, v17, v18, v19}, [x0], #64
ld1.2d {v20, v21, v22, v23}, [x0], #64
ld1.2d {v24}, [x0]
loop:
round
subs x3, x3, #1
cbnz x3, loop
mov x0, x2
st1.2d {v0, v1, v2, v3}, [x0], #64
st1.2d {v4, v5, v6, v7}, [x0], #64
st1.2d {v8, v9, v10, v11}, [x0], #64
st1.2d {v12, v13, v14, v15}, [x0], #64
st1.2d {v16, v17, v18, v19}, [x0], #64
st1.2d {v20, v21, v22, v23}, [x0], #64
st1.2d {v24}, [x0]
ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16
ret lr
|
mktmansour/MKT-KSA-Geolocation-Security
| 4,064
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-44/avx2/pointwise.S
|
#include "params.h"
#include "cdecl.h"
.text
.global cdecl(PQCLEAN_MLDSA44_AVX2_pointwise_avx)
.global _cdecl(PQCLEAN_MLDSA44_AVX2_pointwise_avx)
cdecl(PQCLEAN_MLDSA44_AVX2_pointwise_avx):
_cdecl(PQCLEAN_MLDSA44_AVX2_pointwise_avx):
#consts
vmovdqa _8XQINV*4(%rcx),%ymm0
vmovdqa _8XQ*4(%rcx),%ymm1
xor %eax,%eax
_looptop1:
#load
vmovdqa (%rsi),%ymm2
vmovdqa 32(%rsi),%ymm4
vmovdqa 64(%rsi),%ymm6
vmovdqa (%rdx),%ymm10
vmovdqa 32(%rdx),%ymm12
vmovdqa 64(%rdx),%ymm14
vpsrlq $32,%ymm2,%ymm3
vpsrlq $32,%ymm4,%ymm5
vmovshdup %ymm6,%ymm7
vpsrlq $32,%ymm10,%ymm11
vpsrlq $32,%ymm12,%ymm13
vmovshdup %ymm14,%ymm15
#mul
vpmuldq %ymm2,%ymm10,%ymm2
vpmuldq %ymm3,%ymm11,%ymm3
vpmuldq %ymm4,%ymm12,%ymm4
vpmuldq %ymm5,%ymm13,%ymm5
vpmuldq %ymm6,%ymm14,%ymm6
vpmuldq %ymm7,%ymm15,%ymm7
#reduce
vpmuldq %ymm0,%ymm2,%ymm10
vpmuldq %ymm0,%ymm3,%ymm11
vpmuldq %ymm0,%ymm4,%ymm12
vpmuldq %ymm0,%ymm5,%ymm13
vpmuldq %ymm0,%ymm6,%ymm14
vpmuldq %ymm0,%ymm7,%ymm15
vpmuldq %ymm1,%ymm10,%ymm10
vpmuldq %ymm1,%ymm11,%ymm11
vpmuldq %ymm1,%ymm12,%ymm12
vpmuldq %ymm1,%ymm13,%ymm13
vpmuldq %ymm1,%ymm14,%ymm14
vpmuldq %ymm1,%ymm15,%ymm15
vpsubq %ymm10,%ymm2,%ymm2
vpsubq %ymm11,%ymm3,%ymm3
vpsubq %ymm12,%ymm4,%ymm4
vpsubq %ymm13,%ymm5,%ymm5
vpsubq %ymm14,%ymm6,%ymm6
vpsubq %ymm15,%ymm7,%ymm7
vpsrlq $32,%ymm2,%ymm2
vpsrlq $32,%ymm4,%ymm4
vmovshdup %ymm6,%ymm6
#store
vpblendd $0xAA,%ymm3,%ymm2,%ymm2
vpblendd $0xAA,%ymm5,%ymm4,%ymm4
vpblendd $0xAA,%ymm7,%ymm6,%ymm6
vmovdqa %ymm2,(%rdi)
vmovdqa %ymm4,32(%rdi)
vmovdqa %ymm6,64(%rdi)
add $96,%rdi
add $96,%rsi
add $96,%rdx
add $1,%eax
cmp $10,%eax
jb _looptop1
vmovdqa (%rsi),%ymm2
vmovdqa 32(%rsi),%ymm4
vmovdqa (%rdx),%ymm10
vmovdqa 32(%rdx),%ymm12
vpsrlq $32,%ymm2,%ymm3
vpsrlq $32,%ymm4,%ymm5
vmovshdup %ymm10,%ymm11
vmovshdup %ymm12,%ymm13
#mul
vpmuldq %ymm2,%ymm10,%ymm2
vpmuldq %ymm3,%ymm11,%ymm3
vpmuldq %ymm4,%ymm12,%ymm4
vpmuldq %ymm5,%ymm13,%ymm5
#reduce
vpmuldq %ymm0,%ymm2,%ymm10
vpmuldq %ymm0,%ymm3,%ymm11
vpmuldq %ymm0,%ymm4,%ymm12
vpmuldq %ymm0,%ymm5,%ymm13
vpmuldq %ymm1,%ymm10,%ymm10
vpmuldq %ymm1,%ymm11,%ymm11
vpmuldq %ymm1,%ymm12,%ymm12
vpmuldq %ymm1,%ymm13,%ymm13
vpsubq %ymm10,%ymm2,%ymm2
vpsubq %ymm11,%ymm3,%ymm3
vpsubq %ymm12,%ymm4,%ymm4
vpsubq %ymm13,%ymm5,%ymm5
vpsrlq $32,%ymm2,%ymm2
vmovshdup %ymm4,%ymm4
#store
vpblendd $0x55,%ymm2,%ymm3,%ymm2
vpblendd $0x55,%ymm4,%ymm5,%ymm4
vmovdqa %ymm2,(%rdi)
vmovdqa %ymm4,32(%rdi)
ret
.macro pointwise off
#load
vmovdqa \off(%rsi),%ymm6
vmovdqa \off+32(%rsi),%ymm8
vmovdqa \off(%rdx),%ymm10
vmovdqa \off+32(%rdx),%ymm12
vpsrlq $32,%ymm6,%ymm7
vpsrlq $32,%ymm8,%ymm9
vmovshdup %ymm10,%ymm11
vmovshdup %ymm12,%ymm13
#mul
vpmuldq %ymm6,%ymm10,%ymm6
vpmuldq %ymm7,%ymm11,%ymm7
vpmuldq %ymm8,%ymm12,%ymm8
vpmuldq %ymm9,%ymm13,%ymm9
.endm
.macro acc
vpaddq %ymm6,%ymm2,%ymm2
vpaddq %ymm7,%ymm3,%ymm3
vpaddq %ymm8,%ymm4,%ymm4
vpaddq %ymm9,%ymm5,%ymm5
.endm
.global cdecl(PQCLEAN_MLDSA44_AVX2_pointwise_acc_avx)
.global _cdecl(PQCLEAN_MLDSA44_AVX2_pointwise_acc_avx)
cdecl(PQCLEAN_MLDSA44_AVX2_pointwise_acc_avx):
_cdecl(PQCLEAN_MLDSA44_AVX2_pointwise_acc_avx):
#consts
vmovdqa _8XQINV*4(%rcx),%ymm0
vmovdqa _8XQ*4(%rcx),%ymm1
xor %eax,%eax
_looptop2:
pointwise 0
#mov
vmovdqa %ymm6,%ymm2
vmovdqa %ymm7,%ymm3
vmovdqa %ymm8,%ymm4
vmovdqa %ymm9,%ymm5
pointwise 1024
acc
pointwise 2048
acc
pointwise 3072
acc
#reduce
vpmuldq %ymm0,%ymm2,%ymm6
vpmuldq %ymm0,%ymm3,%ymm7
vpmuldq %ymm0,%ymm4,%ymm8
vpmuldq %ymm0,%ymm5,%ymm9
vpmuldq %ymm1,%ymm6,%ymm6
vpmuldq %ymm1,%ymm7,%ymm7
vpmuldq %ymm1,%ymm8,%ymm8
vpmuldq %ymm1,%ymm9,%ymm9
vpsubq %ymm6,%ymm2,%ymm2
vpsubq %ymm7,%ymm3,%ymm3
vpsubq %ymm8,%ymm4,%ymm4
vpsubq %ymm9,%ymm5,%ymm5
vpsrlq $32,%ymm2,%ymm2
vmovshdup %ymm4,%ymm4
#store
vpblendd $0xAA,%ymm3,%ymm2,%ymm2
vpblendd $0xAA,%ymm5,%ymm4,%ymm4
vmovdqa %ymm2,(%rdi)
vmovdqa %ymm4,32(%rdi)
add $64,%rsi
add $64,%rdx
add $64,%rdi
add $1,%eax
cmp $16,%eax
jb _looptop2
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 1,092
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-44/avx2/shuffle.S
|
#include "cdecl.h"
.include "shuffle.inc"
.text
nttunpack128_avx:
#load
vmovdqa (%rdi),%ymm4
vmovdqa 32(%rdi),%ymm5
vmovdqa 64(%rdi),%ymm6
vmovdqa 96(%rdi),%ymm7
vmovdqa 128(%rdi),%ymm8
vmovdqa 160(%rdi),%ymm9
vmovdqa 192(%rdi),%ymm10
vmovdqa 224(%rdi),%ymm11
shuffle8 4,8,3,8
shuffle8 5,9,4,9
shuffle8 6,10,5,10
shuffle8 7,11,6,11
shuffle4 3,5,7,5
shuffle4 8,10,3,10
shuffle4 4,6,8,6
shuffle4 9,11,4,11
shuffle2 7,8,9,8
shuffle2 5,6,7,6
shuffle2 3,4,5,4
shuffle2 10,11,3,11
#store
vmovdqa %ymm9,(%rdi)
vmovdqa %ymm8,32(%rdi)
vmovdqa %ymm7,64(%rdi)
vmovdqa %ymm6,96(%rdi)
vmovdqa %ymm5,128(%rdi)
vmovdqa %ymm4,160(%rdi)
vmovdqa %ymm3,192(%rdi)
vmovdqa %ymm11,224(%rdi)
ret
.global cdecl(PQCLEAN_MLDSA44_AVX2_nttunpack_avx)
.global _cdecl(PQCLEAN_MLDSA44_AVX2_nttunpack_avx)
cdecl(PQCLEAN_MLDSA44_AVX2_nttunpack_avx):
_cdecl(PQCLEAN_MLDSA44_AVX2_nttunpack_avx):
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 26,021
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-44/avx2/f1600x4.S
|
/* Taken from Bas Westerbaan's new 4-way SHAKE implementation
* for Sphincs+ (https://github.com/sphincs/sphincsplus/pull/14/),
* but uses vpshufb for byte-granular rotations as in the Keccak Code Package. */
#include "cdecl.h"
.data
.p2align 5
rho8:
.byte 7,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14,7,0,1,2,3,4,5,6,15,8,9,10,11,12,13,14
rho56:
.byte 1,2,3,4,5,6,7,0,9,10,11,12,13,14,15,8,1,2,3,4,5,6,7,0,9,10,11,12,13,14,15,8
.text
.global cdecl(PQCLEAN_MLDSA44_AVX2_f1600x4)
.global _cdecl(PQCLEAN_MLDSA44_AVX2_f1600x4)
cdecl(PQCLEAN_MLDSA44_AVX2_f1600x4):
_cdecl(PQCLEAN_MLDSA44_AVX2_f1600x4):
vmovdqa rho8(%rip), %ymm0
movq $6, %rax
looptop:
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 192(%rdi), %ymm4, %ymm9
vpxor 384(%rdi), %ymm3, %ymm10
vpxor 576(%rdi), %ymm2, %ymm11
vpxor 768(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 0(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 768(%rdi)
vpxor 96(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 320(%rdi), %ymm5, %ymm10
vpxor 512(%rdi), %ymm4, %ymm11
vpxor 704(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 32(%rdi), %ymm4, %ymm8
vpxor 224(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 608(%rdi), %ymm1, %ymm11
vpxor 640(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 608(%rdi)
vpxor 128(%rdi), %ymm1, %ymm8
vpxor 160(%rdi), %ymm5, %ymm9
vpxor 352(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 736(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 128(%rdi)
vpxor 64(%rdi), %ymm3, %ymm8
vpxor 256(%rdi), %ymm2, %ymm9
vpxor 448(%rdi), %ymm1, %ymm10
vpxor 480(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 448(%rdi)
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 512(%rdi), %ymm4, %ymm9
vpxor 224(%rdi), %ymm3, %ymm10
vpxor 736(%rdi), %ymm2, %ymm11
vpxor 448(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 8(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 448(%rdi)
vpxor 576(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 640(%rdi), %ymm5, %ymm10
vpxor 352(%rdi), %ymm4, %ymm11
vpxor 64(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 192(%rdi), %ymm4, %ymm8
vpxor 704(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 128(%rdi), %ymm1, %ymm11
vpxor 480(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 128(%rdi)
vpxor 768(%rdi), %ymm1, %ymm8
vpxor 320(%rdi), %ymm5, %ymm9
vpxor 32(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 256(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 768(%rdi)
vpxor 384(%rdi), %ymm3, %ymm8
vpxor 96(%rdi), %ymm2, %ymm9
vpxor 608(%rdi), %ymm1, %ymm10
vpxor 160(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 608(%rdi)
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 352(%rdi), %ymm4, %ymm9
vpxor 704(%rdi), %ymm3, %ymm10
vpxor 256(%rdi), %ymm2, %ymm11
vpxor 608(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 16(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 608(%rdi)
vpxor 736(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 480(%rdi), %ymm5, %ymm10
vpxor 32(%rdi), %ymm4, %ymm11
vpxor 384(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 512(%rdi), %ymm4, %ymm8
vpxor 64(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 768(%rdi), %ymm1, %ymm11
vpxor 160(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 768(%rdi)
vpxor 448(%rdi), %ymm1, %ymm8
vpxor 640(%rdi), %ymm5, %ymm9
vpxor 192(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 96(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 448(%rdi)
vpxor 224(%rdi), %ymm3, %ymm8
vpxor 576(%rdi), %ymm2, %ymm9
vpxor 128(%rdi), %ymm1, %ymm10
vpxor 320(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 128(%rdi)
vmovdqa 0(%rdi), %ymm8
vmovdqa 32(%rdi), %ymm9
vmovdqa 64(%rdi), %ymm10
vmovdqa 96(%rdi), %ymm11
vmovdqa 128(%rdi), %ymm12
vpxor 160(%rdi), %ymm8, %ymm8
vpxor 192(%rdi), %ymm9, %ymm9
vpxor 224(%rdi), %ymm10, %ymm10
vpxor 256(%rdi), %ymm11, %ymm11
vpxor 288(%rdi), %ymm12, %ymm12
vpxor 320(%rdi), %ymm8, %ymm8
vpxor 352(%rdi), %ymm9, %ymm9
vpxor 384(%rdi), %ymm10, %ymm10
vpxor 416(%rdi), %ymm11, %ymm11
vpxor 448(%rdi), %ymm12, %ymm12
vpxor 480(%rdi), %ymm8, %ymm8
vpxor 512(%rdi), %ymm9, %ymm9
vpxor 544(%rdi), %ymm10, %ymm10
vpxor 576(%rdi), %ymm11, %ymm11
vpxor 608(%rdi), %ymm12, %ymm12
vpxor 640(%rdi), %ymm8, %ymm8
vpxor 672(%rdi), %ymm9, %ymm9
vpxor 704(%rdi), %ymm10, %ymm10
vpxor 736(%rdi), %ymm11, %ymm11
vpxor 768(%rdi), %ymm12, %ymm12
vpsllq $1, %ymm9, %ymm13
vpsllq $1, %ymm10, %ymm14
vpsllq $1, %ymm11, %ymm15
vpsllq $1, %ymm12, %ymm7
vpsllq $1, %ymm8, %ymm6
vpsrlq $63, %ymm9, %ymm5
vpsrlq $63, %ymm10, %ymm4
vpsrlq $63, %ymm11, %ymm3
vpsrlq $63, %ymm12, %ymm2
vpsrlq $63, %ymm8, %ymm1
vpor %ymm13, %ymm5, %ymm5
vpor %ymm14, %ymm4, %ymm4
vpor %ymm15, %ymm3, %ymm3
vpor %ymm7, %ymm2, %ymm2
vpor %ymm6, %ymm1, %ymm1
vpxor %ymm5, %ymm12, %ymm5
vpxor %ymm4, %ymm8, %ymm4
vpxor %ymm3, %ymm9, %ymm3
vpxor %ymm2, %ymm10, %ymm2
vpxor %ymm1, %ymm11, %ymm1
vpxor 0(%rdi), %ymm5, %ymm8
vpxor 32(%rdi), %ymm4, %ymm9
vpxor 64(%rdi), %ymm3, %ymm10
vpxor 96(%rdi), %ymm2, %ymm11
vpxor 128(%rdi), %ymm1, %ymm12
vpsllq $44, %ymm9, %ymm14
vpsllq $43, %ymm10, %ymm15
vpsllq $21, %ymm11, %ymm7
vpsllq $14, %ymm12, %ymm6
vpsrlq $20, %ymm9, %ymm9
vpsrlq $21, %ymm10, %ymm10
vpsrlq $43, %ymm11, %ymm11
vpsrlq $50, %ymm12, %ymm12
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vpbroadcastq 24(%rsi), %ymm8
vpxor %ymm8, %ymm13, %ymm13
vmovdqa %ymm13, 0(%rdi)
vmovdqa %ymm14, 32(%rdi)
vmovdqa %ymm15, 64(%rdi)
vmovdqa %ymm7, 96(%rdi)
vmovdqa %ymm6, 128(%rdi)
vpxor 256(%rdi), %ymm2, %ymm8
vpxor 288(%rdi), %ymm1, %ymm9
vpxor 160(%rdi), %ymm5, %ymm10
vpxor 192(%rdi), %ymm4, %ymm11
vpxor 224(%rdi), %ymm3, %ymm12
vpsllq $28, %ymm8, %ymm13
vpsllq $20, %ymm9, %ymm14
vpsllq $3, %ymm10, %ymm15
vpsllq $45, %ymm11, %ymm7
vpsllq $61, %ymm12, %ymm6
vpsrlq $36, %ymm8, %ymm8
vpsrlq $44, %ymm9, %ymm9
vpsrlq $61, %ymm10, %ymm10
vpsrlq $19, %ymm11, %ymm11
vpsrlq $3, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 160(%rdi)
vmovdqa %ymm14, 192(%rdi)
vmovdqa %ymm15, 224(%rdi)
vmovdqa %ymm7, 256(%rdi)
vmovdqa %ymm6, 288(%rdi)
vpxor 352(%rdi), %ymm4, %ymm8
vpxor 384(%rdi), %ymm3, %ymm9
vpxor 416(%rdi), %ymm2, %ymm10
vpxor 448(%rdi), %ymm1, %ymm11
vpxor 320(%rdi), %ymm5, %ymm12
vpsllq $1, %ymm8, %ymm13
vpsllq $6, %ymm9, %ymm14
vpsllq $25, %ymm10, %ymm15
#vpsllq $8, %ymm11, %ymm7
vpsllq $18, %ymm12, %ymm6
vpsrlq $63, %ymm8, %ymm8
vpsrlq $58, %ymm9, %ymm9
vpsrlq $39, %ymm10, %ymm10
#vpsrlq $56, %ymm11, %ymm11
vpsrlq $46, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
#vpor %ymm7, %ymm11, %ymm11
vpshufb %ymm0, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 320(%rdi)
vmovdqa %ymm14, 352(%rdi)
vmovdqa %ymm15, 384(%rdi)
vmovdqa %ymm7, 416(%rdi)
vmovdqa %ymm6, 448(%rdi)
vpxor 608(%rdi), %ymm1, %ymm8
vpxor 480(%rdi), %ymm5, %ymm9
vpxor 512(%rdi), %ymm4, %ymm10
vpxor 544(%rdi), %ymm3, %ymm11
vpxor 576(%rdi), %ymm2, %ymm12
vpsllq $27, %ymm8, %ymm13
vpsllq $36, %ymm9, %ymm14
vpsllq $10, %ymm10, %ymm15
vpsllq $15, %ymm11, %ymm7
#vpsllq $56, %ymm12, %ymm6
vpsrlq $37, %ymm8, %ymm8
vpsrlq $28, %ymm9, %ymm9
vpsrlq $54, %ymm10, %ymm10
vpsrlq $49, %ymm11, %ymm11
#vpsrlq $8, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
#vpor %ymm6, %ymm12, %ymm12
vpshufb rho56(%rip), %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 480(%rdi)
vmovdqa %ymm14, 512(%rdi)
vmovdqa %ymm15, 544(%rdi)
vmovdqa %ymm7, 576(%rdi)
vmovdqa %ymm6, 608(%rdi)
vpxor 704(%rdi), %ymm3, %ymm8
vpxor 736(%rdi), %ymm2, %ymm9
vpxor 768(%rdi), %ymm1, %ymm10
vpxor 640(%rdi), %ymm5, %ymm11
vpxor 672(%rdi), %ymm4, %ymm12
vpsllq $62, %ymm8, %ymm13
vpsllq $55, %ymm9, %ymm14
vpsllq $39, %ymm10, %ymm15
vpsllq $41, %ymm11, %ymm7
vpsllq $2, %ymm12, %ymm6
vpsrlq $2, %ymm8, %ymm8
vpsrlq $9, %ymm9, %ymm9
vpsrlq $25, %ymm10, %ymm10
vpsrlq $23, %ymm11, %ymm11
vpsrlq $62, %ymm12, %ymm12
vpor %ymm13, %ymm8, %ymm8
vpor %ymm14, %ymm9, %ymm9
vpor %ymm15, %ymm10, %ymm10
vpor %ymm7, %ymm11, %ymm11
vpor %ymm6, %ymm12, %ymm12
vpandn %ymm10, %ymm9, %ymm13
vpandn %ymm11, %ymm10, %ymm14
vpandn %ymm12, %ymm11, %ymm15
vpandn %ymm8, %ymm12, %ymm7
vpandn %ymm9, %ymm8, %ymm6
vpxor %ymm8, %ymm13, %ymm13
vpxor %ymm9, %ymm14, %ymm14
vpxor %ymm10, %ymm15, %ymm15
vpxor %ymm11, %ymm7, %ymm7
vpxor %ymm12, %ymm6, %ymm6
vmovdqa %ymm13, 640(%rdi)
vmovdqa %ymm14, 672(%rdi)
vmovdqa %ymm15, 704(%rdi)
vmovdqa %ymm7, 736(%rdi)
vmovdqa %ymm6, 768(%rdi)
addq $32, %rsi
subq $1, %rax
jnz looptop
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 4,490
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-44/avx2/ntt.S
|
#include "cdecl.h"
.include "shuffle.inc"
.macro butterfly l,h,zl0=1,zl1=1,zh0=2,zh1=2
vpmuldq %ymm\zl0,%ymm\h,%ymm13
vmovshdup %ymm\h,%ymm12
vpmuldq %ymm\zl1,%ymm12,%ymm14
vpmuldq %ymm\zh0,%ymm\h,%ymm\h
vpmuldq %ymm\zh1,%ymm12,%ymm12
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vmovshdup %ymm\h,%ymm\h
vpblendd $0xAA,%ymm12,%ymm\h,%ymm\h
vpsubd %ymm\h,%ymm\l,%ymm12
vpaddd %ymm\h,%ymm\l,%ymm\l
vmovshdup %ymm13,%ymm13
vpblendd $0xAA,%ymm14,%ymm13,%ymm13
vpaddd %ymm13,%ymm12,%ymm\h
vpsubd %ymm13,%ymm\l,%ymm\l
.endm
.macro levels0t1 off
/* level 0 */
vpbroadcastd (_ZETAS_QINV+1)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+1)*4(%rsi),%ymm2
vmovdqa 0+32*\off(%rdi),%ymm4
vmovdqa 128+32*\off(%rdi),%ymm5
vmovdqa 256+32*\off(%rdi),%ymm6
vmovdqa 384+32*\off(%rdi),%ymm7
vmovdqa 512+32*\off(%rdi),%ymm8
vmovdqa 640+32*\off(%rdi),%ymm9
vmovdqa 768+32*\off(%rdi),%ymm10
vmovdqa 896+32*\off(%rdi),%ymm11
butterfly 4,8
butterfly 5,9
butterfly 6,10
butterfly 7,11
/* level 1 */
vpbroadcastd (_ZETAS_QINV+2)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+2)*4(%rsi),%ymm2
butterfly 4,6
butterfly 5,7
vpbroadcastd (_ZETAS_QINV+3)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+3)*4(%rsi),%ymm2
butterfly 8,10
butterfly 9,11
vmovdqa %ymm4, 0+32*\off(%rdi)
vmovdqa %ymm5,128+32*\off(%rdi)
vmovdqa %ymm6,256+32*\off(%rdi)
vmovdqa %ymm7,384+32*\off(%rdi)
vmovdqa %ymm8,512+32*\off(%rdi)
vmovdqa %ymm9,640+32*\off(%rdi)
vmovdqa %ymm10,768+32*\off(%rdi)
vmovdqa %ymm11,896+32*\off(%rdi)
.endm
.macro levels2t7 off
/* level 2 */
vmovdqa 256*\off+ 0(%rdi),%ymm4
vmovdqa 256*\off+ 32(%rdi),%ymm5
vmovdqa 256*\off+ 64(%rdi),%ymm6
vmovdqa 256*\off+ 96(%rdi),%ymm7
vmovdqa 256*\off+128(%rdi),%ymm8
vmovdqa 256*\off+160(%rdi),%ymm9
vmovdqa 256*\off+192(%rdi),%ymm10
vmovdqa 256*\off+224(%rdi),%ymm11
vpbroadcastd (_ZETAS_QINV+4+\off)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+4+\off)*4(%rsi),%ymm2
butterfly 4,8
butterfly 5,9
butterfly 6,10
butterfly 7,11
shuffle8 4,8,3,8
shuffle8 5,9,4,9
shuffle8 6,10,5,10
shuffle8 7,11,6,11
/* level 3 */
vmovdqa (_ZETAS_QINV+8+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+8+8*\off)*4(%rsi),%ymm2
butterfly 3,5
butterfly 8,10
butterfly 4,6
butterfly 9,11
shuffle4 3,5,7,5
shuffle4 8,10,3,10
shuffle4 4,6,8,6
shuffle4 9,11,4,11
/* level 4 */
vmovdqa (_ZETAS_QINV+40+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+40+8*\off)*4(%rsi),%ymm2
butterfly 7,8
butterfly 5,6
butterfly 3,4
butterfly 10,11
shuffle2 7,8,9,8
shuffle2 5,6,7,6
shuffle2 3,4,5,4
shuffle2 10,11,3,11
/* level 5 */
vmovdqa (_ZETAS_QINV+72+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+72+8*\off)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 9,5,1,10,2,15
butterfly 8,4,1,10,2,15
butterfly 7,3,1,10,2,15
butterfly 6,11,1,10,2,15
/* level 6 */
vmovdqa (_ZETAS_QINV+104+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+104+8*\off)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 9,7,1,10,2,15
butterfly 8,6,1,10,2,15
vmovdqa (_ZETAS_QINV+104+8*\off+32)*4(%rsi),%ymm1
vmovdqa (_ZETAS+104+8*\off+32)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 5,3,1,10,2,15
butterfly 4,11,1,10,2,15
/* level 7 */
vmovdqa (_ZETAS_QINV+168+8*\off)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 9,8,1,10,2,15
vmovdqa (_ZETAS_QINV+168+8*\off+32)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off+32)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 7,6,1,10,2,15
vmovdqa (_ZETAS_QINV+168+8*\off+64)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off+64)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 5,4,1,10,2,15
vmovdqa (_ZETAS_QINV+168+8*\off+96)*4(%rsi),%ymm1
vmovdqa (_ZETAS+168+8*\off+96)*4(%rsi),%ymm2
vpsrlq $32,%ymm1,%ymm10
vmovshdup %ymm2,%ymm15
butterfly 3,11,1,10,2,15
vmovdqa %ymm9,256*\off+ 0(%rdi)
vmovdqa %ymm8,256*\off+ 32(%rdi)
vmovdqa %ymm7,256*\off+ 64(%rdi)
vmovdqa %ymm6,256*\off+ 96(%rdi)
vmovdqa %ymm5,256*\off+128(%rdi)
vmovdqa %ymm4,256*\off+160(%rdi)
vmovdqa %ymm3,256*\off+192(%rdi)
vmovdqa %ymm11,256*\off+224(%rdi)
.endm
.text
.global cdecl(PQCLEAN_MLDSA44_AVX2_ntt_avx)
.global _cdecl(PQCLEAN_MLDSA44_AVX2_ntt_avx)
cdecl(PQCLEAN_MLDSA44_AVX2_ntt_avx):
_cdecl(PQCLEAN_MLDSA44_AVX2_ntt_avx):
vmovdqa _8XQ*4(%rsi),%ymm0
levels0t1 0
levels0t1 1
levels0t1 2
levels0t1 3
levels2t7 0
levels2t7 1
levels2t7 2
levels2t7 3
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 5,851
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-44/avx2/invntt.S
|
#include "cdecl.h"
.include "shuffle.inc"
.macro butterfly l,h,zl0=1,zl1=1,zh0=2,zh1=2
vpsubd %ymm\l,%ymm\h,%ymm12
vpaddd %ymm\h,%ymm\l,%ymm\l
vpmuldq %ymm\zl0,%ymm12,%ymm13
vmovshdup %ymm12,%ymm\h
vpmuldq %ymm\zl1,%ymm\h,%ymm14
vpmuldq %ymm\zh0,%ymm12,%ymm12
vpmuldq %ymm\zh1,%ymm\h,%ymm\h
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vpsubd %ymm13,%ymm12,%ymm12
vpsubd %ymm14,%ymm\h,%ymm\h
vmovshdup %ymm12,%ymm12
vpblendd $0xAA,%ymm\h,%ymm12,%ymm\h
.endm
.macro levels0t5 off
vmovdqa 256*\off+ 0(%rdi),%ymm4
vmovdqa 256*\off+ 32(%rdi),%ymm5
vmovdqa 256*\off+ 64(%rdi),%ymm6
vmovdqa 256*\off+ 96(%rdi),%ymm7
vmovdqa 256*\off+128(%rdi),%ymm8
vmovdqa 256*\off+160(%rdi),%ymm9
vmovdqa 256*\off+192(%rdi),%ymm10
vmovdqa 256*\off+224(%rdi),%ymm11
/* level 0 */
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-8)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-8)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 4,5,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-40)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-40)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 6,7,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-72)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-72)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 8,9,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+296-8*\off-104)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+296-8*\off-104)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 10,11,1,3,2,15
/* level 1 */
vpermq $0x1B,(_ZETAS_QINV+168-8*\off-8)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+168-8*\off-8)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 4,6,1,3,2,15
butterfly 5,7,1,3,2,15
vpermq $0x1B,(_ZETAS_QINV+168-8*\off-40)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+168-8*\off-40)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 8,10,1,3,2,15
butterfly 9,11,1,3,2,15
/* level 2 */
vpermq $0x1B,(_ZETAS_QINV+104-8*\off-8)*4(%rsi),%ymm3
vpermq $0x1B,(_ZETAS+104-8*\off-8)*4(%rsi),%ymm15
vmovshdup %ymm3,%ymm1
vmovshdup %ymm15,%ymm2
butterfly 4,8,1,3,2,15
butterfly 5,9,1,3,2,15
butterfly 6,10,1,3,2,15
butterfly 7,11,1,3,2,15
/* level 3 */
shuffle2 4,5,3,5
shuffle2 6,7,4,7
shuffle2 8,9,6,9
shuffle2 10,11,8,11
vpermq $0x1B,(_ZETAS_QINV+72-8*\off-8)*4(%rsi),%ymm1
vpermq $0x1B,(_ZETAS+72-8*\off-8)*4(%rsi),%ymm2
butterfly 3,5
butterfly 4,7
butterfly 6,9
butterfly 8,11
/* level 4 */
shuffle4 3,4,10,4
shuffle4 6,8,3,8
shuffle4 5,7,6,7
shuffle4 9,11,5,11
vpermq $0x1B,(_ZETAS_QINV+40-8*\off-8)*4(%rsi),%ymm1
vpermq $0x1B,(_ZETAS+40-8*\off-8)*4(%rsi),%ymm2
butterfly 10,4
butterfly 3,8
butterfly 6,7
butterfly 5,11
/* level 5 */
shuffle8 10,3,9,3
shuffle8 6,5,10,5
shuffle8 4,8,6,8
shuffle8 7,11,4,11
vpbroadcastd (_ZETAS_QINV+7-\off)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+7-\off)*4(%rsi),%ymm2
butterfly 9,3
butterfly 10,5
butterfly 6,8
butterfly 4,11
vmovdqa %ymm9,256*\off+ 0(%rdi)
vmovdqa %ymm10,256*\off+ 32(%rdi)
vmovdqa %ymm6,256*\off+ 64(%rdi)
vmovdqa %ymm4,256*\off+ 96(%rdi)
vmovdqa %ymm3,256*\off+128(%rdi)
vmovdqa %ymm5,256*\off+160(%rdi)
vmovdqa %ymm8,256*\off+192(%rdi)
vmovdqa %ymm11,256*\off+224(%rdi)
.endm
.macro levels6t7 off
vmovdqa 0+32*\off(%rdi),%ymm4
vmovdqa 128+32*\off(%rdi),%ymm5
vmovdqa 256+32*\off(%rdi),%ymm6
vmovdqa 384+32*\off(%rdi),%ymm7
vmovdqa 512+32*\off(%rdi),%ymm8
vmovdqa 640+32*\off(%rdi),%ymm9
vmovdqa 768+32*\off(%rdi),%ymm10
vmovdqa 896+32*\off(%rdi),%ymm11
/* level 6 */
vpbroadcastd (_ZETAS_QINV+3)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+3)*4(%rsi),%ymm2
butterfly 4,6
butterfly 5,7
vpbroadcastd (_ZETAS_QINV+2)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+2)*4(%rsi),%ymm2
butterfly 8,10
butterfly 9,11
/* level 7 */
vpbroadcastd (_ZETAS_QINV+0)*4(%rsi),%ymm1
vpbroadcastd (_ZETAS+0)*4(%rsi),%ymm2
butterfly 4,8
butterfly 5,9
butterfly 6,10
butterfly 7,11
vmovdqa %ymm8,512+32*\off(%rdi)
vmovdqa %ymm9,640+32*\off(%rdi)
vmovdqa %ymm10,768+32*\off(%rdi)
vmovdqa %ymm11,896+32*\off(%rdi)
vmovdqa (_8XDIV_QINV)*4(%rsi),%ymm1
vmovdqa (_8XDIV)*4(%rsi),%ymm2
vpmuldq %ymm1,%ymm4,%ymm12
vpmuldq %ymm1,%ymm5,%ymm13
vmovshdup %ymm4,%ymm8
vmovshdup %ymm5,%ymm9
vpmuldq %ymm1,%ymm8,%ymm14
vpmuldq %ymm1,%ymm9,%ymm15
vpmuldq %ymm2,%ymm4,%ymm4
vpmuldq %ymm2,%ymm5,%ymm5
vpmuldq %ymm2,%ymm8,%ymm8
vpmuldq %ymm2,%ymm9,%ymm9
vpmuldq %ymm0,%ymm12,%ymm12
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vpmuldq %ymm0,%ymm15,%ymm15
vpsubd %ymm12,%ymm4,%ymm4
vpsubd %ymm13,%ymm5,%ymm5
vpsubd %ymm14,%ymm8,%ymm8
vpsubd %ymm15,%ymm9,%ymm9
vmovshdup %ymm4,%ymm4
vmovshdup %ymm5,%ymm5
vpblendd $0xAA,%ymm8,%ymm4,%ymm4
vpblendd $0xAA,%ymm9,%ymm5,%ymm5
vpmuldq %ymm1,%ymm6,%ymm12
vpmuldq %ymm1,%ymm7,%ymm13
vmovshdup %ymm6,%ymm8
vmovshdup %ymm7,%ymm9
vpmuldq %ymm1,%ymm8,%ymm14
vpmuldq %ymm1,%ymm9,%ymm15
vpmuldq %ymm2,%ymm6,%ymm6
vpmuldq %ymm2,%ymm7,%ymm7
vpmuldq %ymm2,%ymm8,%ymm8
vpmuldq %ymm2,%ymm9,%ymm9
vpmuldq %ymm0,%ymm12,%ymm12
vpmuldq %ymm0,%ymm13,%ymm13
vpmuldq %ymm0,%ymm14,%ymm14
vpmuldq %ymm0,%ymm15,%ymm15
vpsubd %ymm12,%ymm6,%ymm6
vpsubd %ymm13,%ymm7,%ymm7
vpsubd %ymm14,%ymm8,%ymm8
vpsubd %ymm15,%ymm9,%ymm9
vmovshdup %ymm6,%ymm6
vmovshdup %ymm7,%ymm7
vpblendd $0xAA,%ymm8,%ymm6,%ymm6
vpblendd $0xAA,%ymm9,%ymm7,%ymm7
vmovdqa %ymm4, 0+32*\off(%rdi)
vmovdqa %ymm5,128+32*\off(%rdi)
vmovdqa %ymm6,256+32*\off(%rdi)
vmovdqa %ymm7,384+32*\off(%rdi)
.endm
.text
.global cdecl(PQCLEAN_MLDSA44_AVX2_invntt_avx)
.global _cdecl(PQCLEAN_MLDSA44_AVX2_invntt_avx)
cdecl(PQCLEAN_MLDSA44_AVX2_invntt_avx):
_cdecl(PQCLEAN_MLDSA44_AVX2_invntt_avx):
vmovdqa _8XQ*4(%rsi),%ymm0
levels0t5 0
levels0t5 1
levels0t5 2
levels0t5 3
levels6t7 0
levels6t7 1
levels6t7 2
levels6t7 3
ret
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 19,073
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-44/aarch64/__asm_iNTT.S
|
/*
* We offer
* CC0 1.0 Universal or the following MIT License for this file.
* You may freely choose one of them that applies.
*
* MIT License
*
* Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang
* Copyright (c) 2023: Vincent Hwang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "macros.inc"
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_intt_SIMD_top
.global _PQCLEAN_MLDSA44_AARCH64__asm_intt_SIMD_top
PQCLEAN_MLDSA44_AARCH64__asm_intt_SIMD_top:
_PQCLEAN_MLDSA44_AARCH64__asm_intt_SIMD_top:
push_all
Q .req w20
Qhalf .req w21
nQhalf .req w22
invNR2ph .req w24
invNR2dp .req w25
invNWR2ph .req w26
invNWR2dp .req w27
src .req x0
counter .req x19
ldr Q, [x2, #0]
lsr Qhalf, Q, #1
neg nQhalf, Qhalf
ldr invNR2ph, [x2, #16]
ldr invNR2dp, [x2, #20]
ldr invNWR2ph, [x2, #24]
ldr invNWR2dp, [x2, #28]
ldr q20, [x1, #0*16]
ldr q21, [x1, #1*16]
ldr q22, [x1, #2*16]
ldr q23, [x1, #3*16]
ldr q24, [x1, #4*16]
ldr q25, [x1, #5*16]
ldr q26, [x1, #6*16]
ldr q27, [x1, #7*16]
mov v20.S[0], Q
ldr q0, [src, # 0*64]
ldr q1, [src, # 1*64]
ldr q2, [src, # 2*64]
ldr q3, [src, # 3*64]
ldr q4, [src, # 4*64]
ldr q5, [src, # 5*64]
ldr q6, [src, # 6*64]
ldr q7, [src, # 7*64]
qq_butterfly_botll \
v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, \
src, \
q8, q9, q10, q11, \
#8*64, #9*64, #10*64, #11*64, \
src, \
q12, q13, q14, q15, \
#12*64, #13*64, #14*64, #15*64
qq_butterfly_mix_rev v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v20, v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mix_rev v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v20, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3
qq_butterfly_mix_rev v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v20, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3
qq_butterfly_mix_rev v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v20, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1
qq_butterfly_mix_rev v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_butterfly_top v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_sub_add v16, v17, v18, v19, v28, v29, v30, v31, v0, v2, v4, v6, v8, v10, v12, v14
qq_sub_add v0, v2, v4, v6, v8, v10, v12, v14, v1, v3, v5, v7, v9, v11, v13, v15
mov v20.S[2], invNR2ph
mov v20.S[3], invNR2dp
qq_montgomery_mul v1, v3, v5, v7, v0, v2, v4, v6, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v0, v2, v4, v6, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
mov v20.S[2], invNWR2ph
mov v20.S[3], invNWR2dp
qq_montgomery_mul v9, v11, v13, v15, v8, v10, v12, v14, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v8, v10, v12, v14, v28, v29, v30, v31, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
mov counter, #3
_intt_top_loop:
dup v29.4S, Q
dup v30.4S, Qhalf
dup v31.4S, nQhalf
cmge v18.4S, v31.4S, v0.4S
cmge v19.4S, v31.4S, v1.4S
cmge v16.4S, v0.4S, v30.4S
cmge v17.4S, v1.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v0.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v2.4S
mla v1.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v3.4S
str q0, [src, #0*64]
cmge v16.4S, v2.4S, v30.4S
ldr q0, [src, #(16 + 0*64)]
str q1, [src, #1*64]
cmge v17.4S, v3.4S, v30.4S
ldr q1, [src, #(16 + 1*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v2.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v4.4S
mla v3.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v5.4S
str q2, [src, #2*64]
cmge v16.4S, v4.4S, v30.4S
ldr q2, [src, #(16 + 2*64)]
str q3, [src, #3*64]
cmge v17.4S, v5.4S, v30.4S
ldr q3, [src, #(16 + 3*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v4.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v6.4S
mla v5.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v7.4S
str q4, [src, #4*64]
cmge v16.4S, v6.4S, v30.4S
ldr q4, [src, #(16 + 4*64)]
str q5, [src, #5*64]
cmge v17.4S, v7.4S, v30.4S
ldr q5, [src, #(16 + 5*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v6.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v8.4S
mla v7.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v9.4S
str q6, [src, #6*64]
cmge v16.4S, v8.4S, v30.4S
ldr q6, [src, #(16 + 6*64)]
str q7, [src, #7*64]
cmge v17.4S, v9.4S, v30.4S
ldr q7, [src, #(16 + 7*64)]
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v8.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v10.4S
mla v9.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v11.4S
str q8, [src, #8*64]
cmge v16.4S, v10.4S, v30.4S
str q9, [src, #9*64]
cmge v17.4S, v11.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v10.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v12.4S
mla v11.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v13.4S
str q10, [src, #10*64]
cmge v16.4S, v12.4S, v30.4S
str q11, [src, #11*64]
cmge v17.4S, v13.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v12.4S, v16.4S, v29.4S
cmge v18.4S, v31.4S, v14.4S
mla v13.4S, v17.4S, v29.4S
cmge v19.4S, v31.4S, v15.4S
str q12, [src, #12*64]
cmge v16.4S, v14.4S, v30.4S
str q13, [src, #13*64]
cmge v17.4S, v15.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v14.4S, v16.4S, v29.4S
mla v15.4S, v17.4S, v29.4S
str q14, [src, #14*64]
str q15, [src, #15*64]
add src, src, #16
qq_butterfly_botll \
v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, \
src, \
q8, q9, q10, q11, \
#8*64, #9*64, #10*64, #11*64, \
src, \
q12, q13, q14, q15, \
#12*64, #13*64, #14*64, #15*64
qq_butterfly_mix_rev v0, v2, v4, v6, v16, v17, v18, v19, v1, v3, v5, v7, v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v20, v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mix_rev v8, v10, v12, v14, v28, v29, v30, v31, v9, v11, v13, v15, v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v20, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3
qq_butterfly_mix_rev v0, v1, v4, v5, v16, v17, v18, v19, v2, v3, v6, v7, v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v20, v22, 0, 1, v22, 0, 1, v22, 2, 3, v22, 2, 3, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3
qq_butterfly_mix_rev v8, v9, v12, v13, v28, v29, v30, v31, v10, v11, v14, v15, v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v20, v23, 0, 1, v23, 0, 1, v23, 2, 3, v23, 2, 3, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1
qq_butterfly_mix_rev v0, v1, v2, v3, v16, v17, v18, v19, v4, v5, v6, v7, v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_butterfly_top v8, v9, v10, v11, v28, v29, v30, v31, v12, v13, v14, v15, v20, v21, 2, 3, v21, 2, 3, v21, 2, 3, v21, 2, 3
qq_sub_add v16, v17, v18, v19, v28, v29, v30, v31, v0, v2, v4, v6, v8, v10, v12, v14
qq_sub_add v0, v2, v4, v6, v8, v10, v12, v14, v1, v3, v5, v7, v9, v11, v13, v15
mov v20.S[2], invNR2ph
mov v20.S[3], invNR2dp
qq_montgomery_mul v1, v3, v5, v7, v0, v2, v4, v6, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v0, v2, v4, v6, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
mov v20.S[2], invNWR2ph
mov v20.S[3], invNWR2dp
qq_montgomery_mul v9, v11, v13, v15, v8, v10, v12, v14, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
qq_montgomery_mul v8, v10, v12, v14, v28, v29, v30, v31, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3
sub counter, counter, #1
cbnz counter, _intt_top_loop
dup v29.4S, Q
dup v30.4S, Qhalf
dup v31.4S, nQhalf
cmge v18.4S, v31.4S, v0.4S
cmge v19.4S, v31.4S, v1.4S
cmge v16.4S, v0.4S, v30.4S
cmge v17.4S, v1.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v0.4S, v16.4S, v29.4S
mla v1.4S, v17.4S, v29.4S
str q0, [src, #0*64]
str q1, [src, #1*64]
cmge v18.4S, v31.4S, v2.4S
cmge v19.4S, v31.4S, v3.4S
cmge v16.4S, v2.4S, v30.4S
cmge v17.4S, v3.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v2.4S, v16.4S, v29.4S
mla v3.4S, v17.4S, v29.4S
str q2, [src, #2*64]
str q3, [src, #3*64]
cmge v18.4S, v31.4S, v4.4S
cmge v19.4S, v31.4S, v5.4S
cmge v16.4S, v4.4S, v30.4S
cmge v17.4S, v5.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v4.4S, v16.4S, v29.4S
mla v5.4S, v17.4S, v29.4S
str q4, [src, #4*64]
str q5, [src, #5*64]
cmge v18.4S, v31.4S, v6.4S
cmge v19.4S, v31.4S, v7.4S
cmge v16.4S, v6.4S, v30.4S
cmge v17.4S, v7.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v6.4S, v16.4S, v29.4S
mla v7.4S, v17.4S, v29.4S
str q6, [src, #6*64]
str q7, [src, #7*64]
cmge v18.4S, v31.4S, v8.4S
cmge v19.4S, v31.4S, v9.4S
cmge v16.4S, v8.4S, v30.4S
cmge v17.4S, v9.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v8.4S, v16.4S, v29.4S
mla v9.4S, v17.4S, v29.4S
str q8, [src, #8*64]
str q9, [src, #9*64]
cmge v18.4S, v31.4S, v10.4S
cmge v19.4S, v31.4S, v11.4S
cmge v16.4S, v10.4S, v30.4S
cmge v17.4S, v11.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v10.4S, v16.4S, v29.4S
mla v11.4S, v17.4S, v29.4S
str q10, [src, #10*64]
str q11, [src, #11*64]
cmge v18.4S, v31.4S, v12.4S
cmge v19.4S, v31.4S, v13.4S
cmge v16.4S, v12.4S, v30.4S
cmge v17.4S, v13.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v12.4S, v16.4S, v29.4S
mla v13.4S, v17.4S, v29.4S
str q12, [src, #12*64]
str q13, [src, #13*64]
cmge v18.4S, v31.4S, v14.4S
cmge v19.4S, v31.4S, v15.4S
cmge v16.4S, v14.4S, v30.4S
cmge v17.4S, v15.4S, v30.4S
sub v16.4S, v16.4S, v18.4S
sub v17.4S, v17.4S, v19.4S
mla v14.4S, v16.4S, v29.4S
mla v15.4S, v17.4S, v29.4S
str q14, [src, #14*64]
str q15, [src, #15*64]
add src, src, #16
.unreq Q
.unreq Qhalf
.unreq nQhalf
.unreq invNR2ph
.unreq invNR2dp
.unreq invNWR2ph
.unreq invNWR2dp
.unreq src
.unreq counter
pop_all
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_intt_SIMD_bot
.global _PQCLEAN_MLDSA44_AARCH64__asm_intt_SIMD_bot
PQCLEAN_MLDSA44_AARCH64__asm_intt_SIMD_bot:
_PQCLEAN_MLDSA44_AARCH64__asm_intt_SIMD_bot:
push_all
Q .req w20
RphRdp .req x21
src0 .req x0
src1 .req x2
table0 .req x28
table1 .req x27
counter .req x19
ldr Q, [x2]
ldr RphRdp, [x2, #8]
add table0, x1, #128
add table1, table0, #1024
add src1, src0, #512
ldr q8, [table0, #4*16]
ldr q9, [table0, #5*16]
ldr q10, [table0, #6*16]
ldr q11, [table0, #7*16]
ldr q24, [table1, #4*16]
ldr q25, [table1, #5*16]
ldr q26, [table1, #6*16]
ldr q27, [table1, #7*16]
ldr q0, [src0, # 0*16]
ldr q1, [src0, # 1*16]
ldr q16, [src1, # 0*16]
ldr q17, [src1, # 1*16]
ldr q2, [src0, # 2*16]
ldr q3, [src0, # 3*16]
ldr q18, [src1, # 2*16]
ldr q19, [src1, # 3*16]
trn_4x4_l4 \
v0, v1, v2, v3, v12, v13, v14, v15, \
table0, \
q4, q5, q6, q7, \
#0*16, #1*16, #2*16, #3*16
trn_4x4_l4 \
v16, v17, v18, v19, v28, v29, v30, v31, \
table1, \
q20, q21, q22, q23, \
#0*16, #1*16, #2*16, #3*16
mov v4.S[0], Q
mov v20.D[0], RphRdp
dq_butterfly_vec_bot v0, v2, v12, v13, v1, v3, v4, v8, v9, v10, v11
dq_butterfly_vec_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v8, v9, v10, v11, v24, v25, v26, v27
dq_butterfly_vec_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v24, v25, v26, v27, v6, v7, v6, v7
dq_butterfly_vec_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v6, v7, v6, v7, v22, v23, v22, v23
mov counter, #7
_intt_bot_loop:
dq_butterfly_vec_top_ltrn_4x4 \
v28, v29, v18, v19, v4, v22, v23, v22, v23, \
table0, \
q8, q9, q10, q11, \
#(128+4*16), #(128+5*16), #(128+6*16), #(128+7*16), \
v0, v1, v2, v3, v12, v13, v14, v15
trn_4x4_l4 \
v16, v17, v18, v19, v28, v29, v30, v31, \
table1, \
q24, q25, q26, q27, \
#(128+4*16), #(128+5*16), #(128+6*16), #(128+7*16)
dq_butterfly_bot v0, v2, v12, v13, v1, v3, v4, v5, 0, 1, v5, 2, 3
dq_butterfly_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3
dq_butterfly_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v21, 0, 1, v21, 2, 3, v4, 2, 3, v4, 2, 3
dq_butterfly_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_top v16, v17, v28, v29, v18, v19, v4, v20, 2, 3, v20, 2, 3
str q2, [src0, # 2*16]
srshr v14.4S, v0.4S, #23
ldr q2, [src0, #(64+ 2*16)]
str q3, [src0, # 3*16]
srshr v15.4S, v1.4S, #23
ldr q3, [src0, #(64+ 3*16)]
str q18, [src1, # 2*16]
srshr v30.4S, v16.4S, #23
ldr q18, [src1, #(64+ 2*16)]
str q19, [src1, # 3*16]
srshr v31.4S, v17.4S, #23
ldr q19, [src1, #(64+ 3*16)]
mls v0.4S, v14.4S, v4.S[0]
str q0, [src0, # 0*16]
ldr q0, [src0, #(64+ 0*16)]
mls v1.4S, v15.4S, v4.S[0]
str q1, [src0, # 1*16]
ldr q1, [src0, #(64+ 1*16)]
mls v16.4S, v30.4S, v4.S[0]
str q16, [src1, # 0*16]
ldr q16, [src1, #(64+ 0*16)]
mls v17.4S, v31.4S, v4.S[0]
str q17, [src1, # 1*16]
ldr q17, [src1, #(64+ 1*16)]
add table0, table0, #128
add table1, table1, #128
add src0, src0, #64
add src1, src1, #64
trn_4x4_l4 \
v0, v1, v2, v3, v12, v13, v14, v15, \
table0, \
q4, q5, q6, q7, \
#0*16, #1*16, #2*16, #3*16
trn_4x4_l4 \
v16, v17, v18, v19, v28, v29, v30, v31, \
table1, \
q20, q21, q22, q23, \
#0*16, #1*16, #2*16, #3*16
mov v4.S[0], Q
mov v20.D[0], RphRdp
dq_butterfly_vec_bot v0, v2, v12, v13, v1, v3, v4, v8, v9, v10, v11
dq_butterfly_vec_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v8, v9, v10, v11, v24, v25, v26, v27
dq_butterfly_vec_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v24, v25, v26, v27, v6, v7, v6, v7
dq_butterfly_vec_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v6, v7, v6, v7, v22, v23, v22, v23
sub counter, counter, #1
cbnz counter, _intt_bot_loop
dq_butterfly_vec_top_trn_4x4 \
v16, v17, v28, v29, v18, v19, v4, v22, v23, v22, v23, \
v0, v1, v2, v3, v12, v13, v14, v15
trn_4x4 v16, v17, v18, v19, v28, v29, v30, v31
dq_butterfly_bot v0, v2, v12, v13, v1, v3, v4, v5, 0, 1, v5, 2, 3
dq_butterfly_mix_rev v0, v2, v12, v13, v1, v3, v16, v18, v28, v29, v17, v19, v4, v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3
dq_butterfly_mix_rev v16, v18, v28, v29, v17, v19, v0, v1, v12, v13, v2, v3, v4, v21, 0, 1, v21, 2, 3, v4, 2, 3, v4, 2, 3
dq_butterfly_mix_rev v0, v1, v12, v13, v2, v3, v16, v17, v28, v29, v18, v19, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_top v16, v17, v28, v29, v18, v19, v4, v20, 2, 3, v20, 2, 3
str q2, [src0, # 2*16]
str q3, [src0, # 3*16]
str q18, [src1, # 2*16]
str q19, [src1, # 3*16]
srshr v14.4S, v0.4S, #23
srshr v15.4S, v1.4S, #23
srshr v30.4S, v16.4S, #23
srshr v31.4S, v17.4S, #23
mls v0.4S, v14.4S, v4.S[0]
mls v1.4S, v15.4S, v4.S[0]
mls v16.4S, v30.4S, v4.S[0]
mls v17.4S, v31.4S, v4.S[0]
str q0, [src0, # 0*16]
str q1, [src0, # 1*16]
str q16, [src1, # 0*16]
str q17, [src1, # 1*16]
add table0, table0, #128
add table1, table1, #128
add src0, src0, #64
add src1, src1, #64
.unreq Q
.unreq RphRdp
.unreq src0
.unreq src1
.unreq table0
.unreq table1
.unreq counter
pop_all
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 17,281
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-44/aarch64/__asm_NTT.S
|
/*
* We offer
* CC0 1.0 Universal or the following MIT License for this file.
* You may freely choose one of them that applies.
*
* MIT License
*
* Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang
* Copyright (c) 2023: Vincent Hwang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "macros.inc"
#include "params.h"
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_ntt_SIMD_top
.global _PQCLEAN_MLDSA44_AARCH64__asm_ntt_SIMD_top
PQCLEAN_MLDSA44_AARCH64__asm_ntt_SIMD_top:
_PQCLEAN_MLDSA44_AARCH64__asm_ntt_SIMD_top:
push_simd
Q .req w8
src .req x0
counter .req x11
ld1 {v20.4S, v21.4S, v22.4S, v23.4S}, [x1], #64
ld1 {v24.4S, v25.4S, v26.4S, v27.4S}, [x1], #64
ldr Q, [x2]
mov v20.S[0], Q
ldr q9, [src, #9*64]
ldr q11, [src, #11*64]
ldr q13, [src, #13*64]
ldr q15, [src, #15*64]
qq_butterfly_topl \
v9, v11, v13, v15, v16, v17, v18, v19, v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q1, q3, q5, q7, \
#1*64, #3*64, #5*64, #7*64
qq_butterfly_mixll \
v1, v3, v5, v7, v9, v11, v13, v15, v16, v17, v18, v19, \
v8, v10, v12, v14, v28, v29, v30, v31, \
v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q8, q10, q12, q14, \
#8*64, #10*64, #12*64, #14*64, \
src, \
q0, q2, q4, q6, \
#0*64, #2*64, #4*64, #6*64
qq_butterfly_mix v0, v2, v4, v6, v8, v10, v12, v14, v28, v29, v30, v31, v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mixssl \
v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, \
v1, v3, v5, v7, v28, v29, v30, v31, \
v20, \
v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, \
src, \
q9, q11, q13, q15, \
#9*64, #11*64, #13*64, #15*64, \
src, \
q8, q10, q12, q14, \
#8*64, #10*64, #12*64, #14*64, \
src, \
q9, q11, q13, q15, \
#(16+9*64), #(16+11*64), #(16+13*64), #(16+15*64)
mov counter, #3
_ntt_top_loop:
qq_butterfly_mixssl \
v0, v2, v4, v6, v1, v3, v5, v7, v28, v29, v30, v31, \
v9, v11, v13, v15, v16, v17, v18, v19, \
v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q1, q3, q5, q7, \
#1*64, #3*64, #5*64, #7*64, \
src, \
q0, q2, q4, q6, \
#0*64, #2*64, #4*64, #6*64, \
src, \
q1, q3, q5, q7, \
#(16+1*64), #(16+3*64), #(16+5*64), #(16+7*64)
qq_butterfly_mixll \
v1, v3, v5, v7, v9, v11, v13, v15, v16, v17, v18, v19, \
v8, v10, v12, v14, v28, v29, v30, v31, \
v20, \
v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, \
src, \
q8, q10, q12, q14, \
#(16+8*64), #(16+10*64), #(16+12*64), #(16+14*64), \
src, \
q0, q2, q4, q6, \
#(16+0*64), #(16+2*64), #(16+4*64), #(16+6*64)
add src, src, #16
qq_butterfly_mix v0, v2, v4, v6, v8, v10, v12, v14, v28, v29, v30, v31, v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v20, v20, 2, 3, v20, 2, 3, v20, 2, 3, v20, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v1, v3, v9, v11, v5, v7, v13, v15, v16, v17, v18, v19, v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3
qq_butterfly_mix v0, v2, v8, v10, v4, v6, v12, v14, v28, v29, v30, v31, v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v20, v21, 0, 1, v21, 0, 1, v21, 2, 3, v21, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v1, v5, v9, v13, v3, v7, v11, v15, v16, v17, v18, v19, v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3
qq_butterfly_mix v0, v4, v8, v12, v2, v6, v10, v14, v28, v29, v30, v31, v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, v20, v22, 0, 1, v22, 2, 3, v23, 0, 1, v23, 2, 3, v26, 0, 1, v26, 2, 3, v27, 0, 1, v27, 2, 3
qq_butterfly_mixssl \
v8, v10, v12, v14, v9, v11, v13, v15, v16, v17, v18, v19, \
v1, v3, v5, v7, v28, v29, v30, v31, \
v20, \
v24, 0, 1, v24, 2, 3, v25, 0, 1, v25, 2, 3, \
src, \
q9, q11, q13, q15, \
#9*64, #11*64, #13*64, #15*64, \
src, \
q8, q10, q12, q14, \
#8*64, #10*64, #12*64, #14*64, \
src, \
q9, q11, q13, q15, \
#(16+9*64), #(16+11*64), #(16+13*64), #(16+15*64)
sub counter, counter, #1
cbnz counter, _ntt_top_loop
qq_butterfly_botss \
v0, v2, v4, v6, v1, v3, v5, v7, v28, v29, v30, v31, \
src, \
q1, q3, q5, q7, \
#1*64, #3*64, #5*64, #7*64, \
src, \
q0, q2, q4, q6, \
#0*64, #2*64, #4*64, #6*64
.unreq Q
.unreq src
.unreq counter
pop_simd
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_ntt_SIMD_bot
.global _PQCLEAN_MLDSA44_AARCH64__asm_ntt_SIMD_bot
PQCLEAN_MLDSA44_AARCH64__asm_ntt_SIMD_bot:
_PQCLEAN_MLDSA44_AARCH64__asm_ntt_SIMD_bot:
push_simd
Q .req w8
src .req x0
table0 .req x9
table1 .req x10
counter .req x11
ldr Q, [x2]
add table0, x1, #128
add table1, table0, #1024
ldr q0, [src, #0*16]
ldr q1, [src, #1*16]
ldr q2, [src, #2*16]
ldr q3, [src, #3*16]
ldr q4, [table0, #0*16]
ldr q5, [table0, #1*16]
ldr q20, [table1, #0*16]
ldr q21, [table1, #1*16]
dq_butterfly_topl4 \
v0, v1, v2, v3, v12, v13, v4, v4, 2, 3, v4, 2, 3, \
src, \
q16, q17, q18, q19, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
dq_butterfly_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v16, v18, v17, v19, v28, v29, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v0, v1, v2, v3, v12, v13, v14, v15
dq_butterfly_vec_top_trn_4x4 \
v0, v1, v2, v3, v12, v13, v4, v6, v7, v6, v7, \
v16, v17, v18, v19, v28, v29, v30, v31
dq_butterfly_vec_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, v4, v8, v9, v10, v11, v24, v25, v26, v27
trn_4x4_l4 v0, v1, v2, v3, v8, v9, v10, v11, src, q12, q13, q14, q15, #(64+0*16), #(64+1*16), #(64+2*16), #(64+3*16)
str q0, [src, #0*16]
str q2, [src, #2*16]
dq_butterfly_vec_bot v16, v18, v17, v19, v28, v29, v4, v24, v25, v26, v27
str q1, [src, #1*16]
str q3, [src, #3*16]
add src, src, #64
trn_4x4_l4 v16, v17, v18, v19, v24, v25, v26, v27, src, q28, q29, q30, q31, #(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
sub src, src, #64
dq_butterfly_top2l4s4 \
v12, v13, v14, v15, v0, v1, v4, v4, 2, 3, v4, 2, 3, \
table0, q4, q5, #0*16, #1*16, \
table1, q20, q21, #0*16, #1*16, \
src, \
q16, q17, q18, q19, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
add src, src, #64
dq_butterfly_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v28, v30, v29, v31, v16, v17, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v12, v13, v14, v15, v0, v1, v2, v3
dq_butterfly_vec_top_trn_4x4 \
v12, v13, v14, v15, v0, v1, v4, v6, v7, v6, v7, \
v28, v29, v30, v31, v16, v17, v18, v19
dq_butterfly_vec_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, v4, v8, v9, v10, v11, v24, v25, v26, v27
mov counter, #3
_ntt_bot_loop:
trn_4x4_l4 v12, v13, v14, v15, v8, v9, v10, v11, src, q0, q1, q2, q3, #(64+0*16), #(64+1*16), #(64+2*16), #(64+3*16)
str q12, [src, #0*16]
str q13, [src, #1*16]
dq_butterfly_vec_bot v28, v30, v29, v31, v16, v17, v4, v24, v25, v26, v27
str q14, [src, #2*16]
str q15, [src, #3*16]
add src, src, #64
trn_4x4_l4 v28, v29, v30, v31, v24, v25, v26, v27, src, q16, q17, q18, q19, #(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
sub src, src, #64
dq_butterfly_top2l4s4 \
v0, v1, v2, v3, v12, v13, v4, v4, 2, 3, v4, 2, 3, \
table0, q4, q5, #0*16, #1*16, \
table1, q20, q21, #0*16, #1*16, \
src, \
q28, q29, q30, q31, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
add src, src, #64
dq_butterfly_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v16, v18, v17, v19, v28, v29, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v0, v1, v2, v3, v12, v13, v14, v15
dq_butterfly_vec_top_trn_4x4 \
v0, v1, v2, v3, v12, v13, v4, v6, v7, v6, v7, \
v16, v17, v18, v19, v28, v29, v30, v31
dq_butterfly_vec_mix v0, v1, v2, v3, v12, v13, v16, v17, v18, v19, v28, v29, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v16, v17, v18, v19, v28, v29, v0, v2, v1, v3, v12, v13, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v0, v2, v1, v3, v12, v13, v16, v18, v17, v19, v28, v29, v4, v8, v9, v10, v11, v24, v25, v26, v27
trn_4x4_l4 v0, v1, v2, v3, v8, v9, v10, v11, src, q12, q13, q14, q15, #(64+0*16), #(64+1*16), #(64+2*16), #(64+3*16)
str q0, [src, #0*16]
str q2, [src, #2*16]
dq_butterfly_vec_bot v16, v18, v17, v19, v28, v29, v4, v24, v25, v26, v27
str q1, [src, #1*16]
str q3, [src, #3*16]
add src, src, #64
trn_4x4_l4 v16, v17, v18, v19, v24, v25, v26, v27, src, q28, q29, q30, q31, #(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
sub src, src, #64
dq_butterfly_top2l4s4 \
v12, v13, v14, v15, v0, v1, v4, v4, 2, 3, v4, 2, 3, \
table0, q4, q5, #0*16, #1*16, \
table1, q20, q21, #0*16, #1*16, \
src, \
q16, q17, q18, q19, \
#(512+0*16), #(512+1*16), #(512+2*16), #(512+3*16)
add src, src, #64
dq_butterfly_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v4, 2, 3, v4, 2, 3, v20, 2, 3, v20, 2, 3
dq_butterfly_mixl6 \
v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, \
v4, \
v20, 2, 3, v20, 2, 3, v5, 0, 1, v5, 2, 3, \
table0, \
q6, q7, q8, q9, q10, q11, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_mixl6 \
v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, \
v4, \
v5, 0, 1, v5, 2, 3, v21, 0, 1, v21, 2, 3, \
table1, \
q22, q23, q24, q25, q26, q27, \
#2*16, #3*16, #4*16, #5*16, #6*16, #7*16
dq_butterfly_bot v28, v30, v29, v31, v16, v17, v4, v21, 0, 1, v21, 2, 3
add table0, table0, #128
add table1, table1, #128
trn_4x4 v12, v13, v14, v15, v0, v1, v2, v3
dq_butterfly_vec_top_trn_4x4 \
v12, v13, v14, v15, v0, v1, v4, v6, v7, v6, v7, \
v28, v29, v30, v31, v16, v17, v18, v19
dq_butterfly_vec_mix v12, v13, v14, v15, v0, v1, v28, v29, v30, v31, v16, v17, v4, v6, v7, v6, v7, v22, v23, v22, v23
dq_butterfly_vec_mix v28, v29, v30, v31, v16, v17, v12, v14, v13, v15, v0, v1, v4, v22, v23, v22, v23, v8, v9, v10, v11
dq_butterfly_vec_mix v12, v14, v13, v15, v0, v1, v28, v30, v29, v31, v16, v17, v4, v8, v9, v10, v11, v24, v25, v26, v27
sub counter, counter, #1
cbnz counter, _ntt_bot_loop
dq_butterfly_vec_bot v28, v30, v29, v31, v16, v17, v4, v24, v25, v26, v27
trn_4x4 v12, v13, v14, v15, v0, v1, v2, v3
trn_4x4_s4 v28, v29, v30, v31, v16, v17, v18, v19, src, q12, q13, q14, q15, #0*16, #1*16, #2*16, #3*16
str q28, [src, #(512+0*16)]
str q29, [src, #(512+1*16)]
str q30, [src, #(512+2*16)]
str q31, [src, #(512+3*16)]
add src, src, #64
.unreq Q
.unreq src
.unreq table0
.unreq table1
.unreq counter
pop_simd
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 31,124
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/ml-dsa-44/aarch64/__asm_poly.S
|
/*
* We offer
* CC0 1.0 Universal or the following MIT License for this file.
* You may freely choose one of them that applies.
*
* MIT License
*
* Copyright (c) 2023: Hanno Becker, Vincent Hwang, Matthias J. Kannwischer, Bo-Yin Yang, and Shang-Yi Yang
* Copyright (c) 2023: Vincent Hwang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "macros.inc"
#include "params.h"
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_10_to_32
.global _PQCLEAN_MLDSA44_AARCH64__asm_10_to_32
PQCLEAN_MLDSA44_AARCH64__asm_10_to_32:
_PQCLEAN_MLDSA44_AARCH64__asm_10_to_32:
mov x7, #16
_10_to_32_loop:
ldr w2, [x1], #4
ubfx w3, w2, #0, #10
str w3, [x0], #4
ubfx w4, w2, #10, #10
str w4, [x0], #4
ubfx w5, w2, #20, #10
str w5, [x0], #4
lsr w6, w2, #30
ldr w2, [x1], #4
ubfx w3, w2, #0, #8
lsl w3, w3, #2
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #8, #10
str w4, [x0], #4
ubfx w5, w2, #18, #10
str w5, [x0], #4
lsr w6, w2, #28
ldr w2, [x1], #4
ubfx w3, w2, #0, #6
lsl w3, w3, #4
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #6, #10
str w4, [x0], #4
ubfx w5, w2, #16, #10
str w5, [x0], #4
lsr w6, w2, #26
ldr w2, [x1], #4
ubfx w3, w2, #0, #4
lsl w3, w3, #6
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #4, #10
str w4, [x0], #4
ubfx w5, w2, #14, #10
str w5, [x0], #4
lsr w6, w2, #24
ldr w2, [x1], #4
ubfx w3, w2, #0, #2
lsl w3, w3, #8
orr w3, w3, w6
str w3, [x0], #4
ubfx w4, w2, #2, #10
str w4, [x0], #4
ubfx w5, w2, #12, #10
str w5, [x0], #4
ubfx w6, w2, #22, #10
str w6, [x0], #4
sub x7, x7, #1
cbnz x7, _10_to_32_loop
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_poly_reduce
.global _PQCLEAN_MLDSA44_AARCH64__asm_poly_reduce
PQCLEAN_MLDSA44_AARCH64__asm_poly_reduce:
_PQCLEAN_MLDSA44_AARCH64__asm_poly_reduce:
ldr w4, [x1]
dup v24.4S, w4
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
mov x16, #7
_poly_reduce_loop:
st1 { v4.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 { v5.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 { v6.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 { v7.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
sub x16, x16, #1
cbnz x16, _poly_reduce_loop
st1 { v4.4S}, [x0], #16
st1 { v5.4S}, [x0], #16
st1 { v6.4S}, [x0], #16
st1 { v7.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_poly_caddq
.global _PQCLEAN_MLDSA44_AARCH64__asm_poly_caddq
PQCLEAN_MLDSA44_AARCH64__asm_poly_caddq:
_PQCLEAN_MLDSA44_AARCH64__asm_poly_caddq:
ldr w4, [x1]
dup v24.4S, w4
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
sshr v16.4S, v0.4S, #31
ld1 { v5.4S}, [x1], #16
sshr v17.4S, v1.4S, #31
ld1 { v6.4S}, [x1], #16
sshr v18.4S, v2.4S, #31
ld1 { v7.4S}, [x1], #16
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
mov x16, #7
_poly_caddq_loop:
st1 { v4.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 { v5.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 { v6.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 { v7.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
sshr v16.4S, v0.4S, #31
ld1 { v5.4S}, [x1], #16
sshr v17.4S, v1.4S, #31
ld1 { v6.4S}, [x1], #16
sshr v18.4S, v2.4S, #31
ld1 { v7.4S}, [x1], #16
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
sub x16, x16, #1
cbnz x16, _poly_caddq_loop
st1 { v4.4S}, [x0], #16
st1 { v5.4S}, [x0], #16
st1 { v6.4S}, [x0], #16
st1 { v7.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_poly_freeze
.global _PQCLEAN_MLDSA44_AARCH64__asm_poly_freeze
PQCLEAN_MLDSA44_AARCH64__asm_poly_freeze:
_PQCLEAN_MLDSA44_AARCH64__asm_poly_freeze:
ldr w4, [x1]
dup v24.4S, w4
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
sshr v16.4S, v0.4S, #31
mls v5.4S, v21.4S, v24.4S
sshr v17.4S, v1.4S, #31
mls v6.4S, v22.4S, v24.4S
sshr v18.4S, v2.4S, #31
mls v7.4S, v23.4S, v24.4S
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
mov x16, #8
_poly_freeze_loop:
st1 { v4.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 { v5.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 { v6.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 { v7.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x1], #16
srshr v16.4S, v0.4S, #23
ld1 { v5.4S}, [x1], #16
srshr v17.4S, v1.4S, #23
ld1 { v6.4S}, [x1], #16
srshr v18.4S, v2.4S, #23
ld1 { v7.4S}, [x1], #16
srshr v19.4S, v3.4S, #23
srshr v20.4S, v4.4S, #23
mls v0.4S, v16.4S, v24.4S
srshr v21.4S, v5.4S, #23
mls v1.4S, v17.4S, v24.4S
srshr v22.4S, v6.4S, #23
mls v2.4S, v18.4S, v24.4S
srshr v23.4S, v7.4S, #23
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
sshr v16.4S, v0.4S, #31
mls v5.4S, v21.4S, v24.4S
sshr v17.4S, v1.4S, #31
mls v6.4S, v22.4S, v24.4S
sshr v18.4S, v2.4S, #31
mls v7.4S, v23.4S, v24.4S
sshr v19.4S, v3.4S, #31
sshr v20.4S, v4.4S, #31
mls v0.4S, v16.4S, v24.4S
sshr v21.4S, v5.4S, #31
mls v1.4S, v17.4S, v24.4S
sshr v22.4S, v6.4S, #31
mls v2.4S, v18.4S, v24.4S
sshr v23.4S, v7.4S, #31
mls v3.4S, v19.4S, v24.4S
mls v4.4S, v20.4S, v24.4S
st1 { v0.4S}, [x0], #16
mls v5.4S, v21.4S, v24.4S
st1 { v1.4S}, [x0], #16
mls v6.4S, v22.4S, v24.4S
st1 { v2.4S}, [x0], #16
mls v7.4S, v23.4S, v24.4S
st1 { v3.4S}, [x0], #16
sub x16, x16, #1
cbnz x16, _poly_freeze_loop
st1 { v4.4S}, [x0], #16
st1 { v5.4S}, [x0], #16
st1 { v6.4S}, [x0], #16
st1 { v7.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_poly_power2round
.global _PQCLEAN_MLDSA44_AARCH64__asm_poly_power2round
PQCLEAN_MLDSA44_AARCH64__asm_poly_power2round:
_PQCLEAN_MLDSA44_AARCH64__asm_poly_power2round:
mov w4, #1
dup v28.4S, w4
ld1 { v0.4S}, [x2], #16
ld1 { v1.4S}, [x2], #16
ld1 { v2.4S}, [x2], #16
ld1 { v3.4S}, [x2], #16
ld1 {v20.4S}, [x2], #16
sub v4.4S, v0.4S, v28.4S
ld1 {v21.4S}, [x2], #16
sub v5.4S, v1.4S, v28.4S
ld1 {v22.4S}, [x2], #16
sub v6.4S, v2.4S, v28.4S
ld1 {v23.4S}, [x2], #16
sub v7.4S, v3.4S, v28.4S
sub v24.4S, v20.4S, v28.4S
srshr v16.4S, v4.4S, #13
sub v25.4S, v21.4S, v28.4S
srshr v17.4S, v5.4S, #13
sub v26.4S, v22.4S, v28.4S
srshr v18.4S, v6.4S, #13
sub v27.4S, v23.4S, v28.4S
srshr v19.4S, v7.4S, #13
srshr v28.4S, v24.4S, #13
st1 {v16.4S}, [x0], #16
srshr v29.4S, v25.4S, #13
st1 {v17.4S}, [x0], #16
srshr v30.4S, v26.4S, #13
st1 {v18.4S}, [x0], #16
srshr v31.4S, v27.4S, #13
st1 {v19.4S}, [x0], #16
st1 {v28.4S}, [x0], #16
shl v4.4S, v16.4S, #13
st1 {v29.4S}, [x0], #16
shl v5.4S, v17.4S, #13
st1 {v30.4S}, [x0], #16
shl v6.4S, v18.4S, #13
st1 {v31.4S}, [x0], #16
shl v7.4S, v19.4S, #13
shl v24.4S, v28.4S, #13
sub v16.4S, v0.4S, v4.4S
shl v25.4S, v29.4S, #13
sub v17.4S, v1.4S, v5.4S
shl v26.4S, v30.4S, #13
sub v18.4S, v2.4S, v6.4S
shl v27.4S, v31.4S, #13
sub v19.4S, v3.4S, v7.4S
sub v28.4S, v20.4S, v24.4S
st1 {v16.4S}, [x1], #16
sub v29.4S, v21.4S, v25.4S
st1 {v17.4S}, [x1], #16
sub v30.4S, v22.4S, v26.4S
st1 {v18.4S}, [x1], #16
sub v31.4S, v23.4S, v27.4S
st1 {v19.4S}, [x1], #16
mov x16, #7
_poly_power2round_loop:
st1 {v28.4S}, [x1], #16
dup v28.4S, w4
ld1 { v0.4S}, [x2], #16
st1 {v29.4S}, [x1], #16
ld1 { v1.4S}, [x2], #16
st1 {v30.4S}, [x1], #16
ld1 { v2.4S}, [x2], #16
st1 {v31.4S}, [x1], #16
ld1 { v3.4S}, [x2], #16
ld1 {v20.4S}, [x2], #16
sub v4.4S, v0.4S, v28.4S
ld1 {v21.4S}, [x2], #16
sub v5.4S, v1.4S, v28.4S
ld1 {v22.4S}, [x2], #16
sub v6.4S, v2.4S, v28.4S
ld1 {v23.4S}, [x2], #16
sub v7.4S, v3.4S, v28.4S
sub v24.4S, v20.4S, v28.4S
srshr v16.4S, v4.4S, #13
sub v25.4S, v21.4S, v28.4S
srshr v17.4S, v5.4S, #13
sub v26.4S, v22.4S, v28.4S
srshr v18.4S, v6.4S, #13
sub v27.4S, v23.4S, v28.4S
srshr v19.4S, v7.4S, #13
srshr v28.4S, v24.4S, #13
st1 {v16.4S}, [x0], #16
srshr v29.4S, v25.4S, #13
st1 {v17.4S}, [x0], #16
srshr v30.4S, v26.4S, #13
st1 {v18.4S}, [x0], #16
srshr v31.4S, v27.4S, #13
st1 {v19.4S}, [x0], #16
st1 {v28.4S}, [x0], #16
shl v4.4S, v16.4S, #13
st1 {v29.4S}, [x0], #16
shl v5.4S, v17.4S, #13
st1 {v30.4S}, [x0], #16
shl v6.4S, v18.4S, #13
st1 {v31.4S}, [x0], #16
shl v7.4S, v19.4S, #13
shl v24.4S, v28.4S, #13
sub v16.4S, v0.4S, v4.4S
shl v25.4S, v29.4S, #13
sub v17.4S, v1.4S, v5.4S
shl v26.4S, v30.4S, #13
sub v18.4S, v2.4S, v6.4S
shl v27.4S, v31.4S, #13
sub v19.4S, v3.4S, v7.4S
sub v28.4S, v20.4S, v24.4S
st1 {v16.4S}, [x1], #16
sub v29.4S, v21.4S, v25.4S
st1 {v17.4S}, [x1], #16
sub v30.4S, v22.4S, v26.4S
st1 {v18.4S}, [x1], #16
sub v31.4S, v23.4S, v27.4S
st1 {v19.4S}, [x1], #16
sub x16, x16, #1
cbnz x16, _poly_power2round_loop
st1 {v28.4S}, [x1], #16
st1 {v29.4S}, [x1], #16
st1 {v30.4S}, [x1], #16
st1 {v31.4S}, [x1], #16
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_poly_add
.global _PQCLEAN_MLDSA44_AARCH64__asm_poly_add
PQCLEAN_MLDSA44_AARCH64__asm_poly_add:
_PQCLEAN_MLDSA44_AARCH64__asm_poly_add:
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
add v16.4S, v0.4S, v4.4S
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
add v17.4S, v1.4S, v5.4S
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
add v18.4S, v2.4S, v6.4S
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
add v19.4S, v3.4S, v7.4S
mov x16, #15
_poly_add_loop:
st1 {v16.4S}, [x0], #16
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
add v16.4S, v0.4S, v4.4S
st1 {v17.4S}, [x0], #16
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
add v17.4S, v1.4S, v5.4S
st1 {v18.4S}, [x0], #16
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
add v18.4S, v2.4S, v6.4S
st1 {v19.4S}, [x0], #16
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
add v19.4S, v3.4S, v7.4S
sub x16, x16, #1
cbnz x16, _poly_add_loop
st1 {v16.4S}, [x0], #16
st1 {v17.4S}, [x0], #16
st1 {v18.4S}, [x0], #16
st1 {v19.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_poly_sub
.global _PQCLEAN_MLDSA44_AARCH64__asm_poly_sub
PQCLEAN_MLDSA44_AARCH64__asm_poly_sub:
_PQCLEAN_MLDSA44_AARCH64__asm_poly_sub:
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
sub v16.4S, v0.4S, v4.4S
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
sub v17.4S, v1.4S, v5.4S
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
sub v18.4S, v2.4S, v6.4S
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
sub v19.4S, v3.4S, v7.4S
mov x16, #15
_poly_sub_loop:
st1 {v16.4S}, [x0], #16
ld1 {v0.4S}, [x1], #16
ld1 {v4.4S}, [x2], #16
sub v16.4S, v0.4S, v4.4S
st1 {v17.4S}, [x0], #16
ld1 {v1.4S}, [x1], #16
ld1 {v5.4S}, [x2], #16
sub v17.4S, v1.4S, v5.4S
st1 {v18.4S}, [x0], #16
ld1 {v2.4S}, [x1], #16
ld1 {v6.4S}, [x2], #16
sub v18.4S, v2.4S, v6.4S
st1 {v19.4S}, [x0], #16
ld1 {v3.4S}, [x1], #16
ld1 {v7.4S}, [x2], #16
sub v19.4S, v3.4S, v7.4S
sub x16, x16, #1
cbnz x16, _poly_sub_loop
st1 {v16.4S}, [x0], #16
st1 {v17.4S}, [x0], #16
st1 {v18.4S}, [x0], #16
st1 {v19.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_poly_shiftl
.global _PQCLEAN_MLDSA44_AARCH64__asm_poly_shiftl
PQCLEAN_MLDSA44_AARCH64__asm_poly_shiftl:
_PQCLEAN_MLDSA44_AARCH64__asm_poly_shiftl:
add x1, x0, #0
ld1 { v0.4S}, [x1], #16
shl v16.4S, v0.4S, #13
ld1 { v1.4S}, [x1], #16
shl v17.4S, v1.4S, #13
ld1 { v2.4S}, [x1], #16
shl v18.4S, v2.4S, #13
ld1 { v3.4S}, [x1], #16
shl v19.4S, v3.4S, #13
ld1 { v4.4S}, [x1], #16
shl v20.4S, v4.4S, #13
ld1 { v5.4S}, [x1], #16
shl v21.4S, v5.4S, #13
ld1 { v6.4S}, [x1], #16
shl v22.4S, v6.4S, #13
ld1 { v7.4S}, [x1], #16
shl v23.4S, v7.4S, #13
mov x16, #7
_poly_shiftl_loop:
st1 {v16.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
shl v16.4S, v0.4S, #13
st1 {v17.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
shl v17.4S, v1.4S, #13
st1 {v18.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
shl v18.4S, v2.4S, #13
st1 {v19.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
shl v19.4S, v3.4S, #13
st1 {v20.4S}, [x0], #16
ld1 { v4.4S}, [x1], #16
shl v20.4S, v4.4S, #13
st1 {v21.4S}, [x0], #16
ld1 { v5.4S}, [x1], #16
shl v21.4S, v5.4S, #13
st1 {v22.4S}, [x0], #16
ld1 { v6.4S}, [x1], #16
shl v22.4S, v6.4S, #13
st1 {v23.4S}, [x0], #16
ld1 { v7.4S}, [x1], #16
shl v23.4S, v7.4S, #13
sub x16, x16, #1
cbnz x16, _poly_shiftl_loop
st1 {v16.4S}, [x0], #16
st1 {v17.4S}, [x0], #16
st1 {v18.4S}, [x0], #16
st1 {v19.4S}, [x0], #16
st1 {v20.4S}, [x0], #16
st1 {v21.4S}, [x0], #16
st1 {v22.4S}, [x0], #16
st1 {v23.4S}, [x0], #16
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_poly_pointwise_montgomery
.global _PQCLEAN_MLDSA44_AARCH64__asm_poly_pointwise_montgomery
PQCLEAN_MLDSA44_AARCH64__asm_poly_pointwise_montgomery:
_PQCLEAN_MLDSA44_AARCH64__asm_poly_pointwise_montgomery:
push_all
ldr w20, [x3, #0]
ldr w21, [x3, #4]
dup v30.4S, w20
dup v31.4S, w21
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x2], #16
ld1 { v5.4S}, [x2], #16
ld1 { v6.4S}, [x2], #16
ld1 { v7.4S}, [x2], #16
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
uzp1 v21.4S, v13.4S, v17.4S
uzp1 v22.4S, v14.4S, v18.4S
uzp1 v23.4S, v15.4S, v19.4S
mul v24.4S, v20.4S, v31.4S
mul v25.4S, v21.4S, v31.4S
mul v26.4S, v22.4S, v31.4S
mul v27.4S, v23.4S, v31.4S
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
mov x16, #15
_poly_pointwise_montgomery_loop:
st1 {v24.4S}, [x0], #16
ld1 { v0.4S}, [x1], #16
st1 {v25.4S}, [x0], #16
ld1 { v1.4S}, [x1], #16
st1 {v26.4S}, [x0], #16
ld1 { v2.4S}, [x1], #16
st1 {v27.4S}, [x0], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x2], #16
ld1 { v5.4S}, [x2], #16
ld1 { v6.4S}, [x2], #16
ld1 { v7.4S}, [x2], #16
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
uzp1 v21.4S, v13.4S, v17.4S
uzp1 v22.4S, v14.4S, v18.4S
uzp1 v23.4S, v15.4S, v19.4S
mul v24.4S, v20.4S, v31.4S
mul v25.4S, v21.4S, v31.4S
mul v26.4S, v22.4S, v31.4S
mul v27.4S, v23.4S, v31.4S
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
sub x16, x16, #1
cbnz x16, _poly_pointwise_montgomery_loop
st1 {v24.4S}, [x0], #16
st1 {v25.4S}, [x0], #16
st1 {v26.4S}, [x0], #16
st1 {v27.4S}, [x0], #16
pop_all
ret
.align 2
.global PQCLEAN_MLDSA44_AARCH64__asm_polyvecl_pointwise_acc_montgomery
.global _PQCLEAN_MLDSA44_AARCH64__asm_polyvecl_pointwise_acc_montgomery
PQCLEAN_MLDSA44_AARCH64__asm_polyvecl_pointwise_acc_montgomery:
_PQCLEAN_MLDSA44_AARCH64__asm_polyvecl_pointwise_acc_montgomery:
push_all
ldr w20, [x3, #0]
ldr w21, [x3, #4]
add x5, x1, #1024*1
add x6, x2, #1024*1
add x7, x1, #1024*2
add x8, x2, #1024*2
add x9, x1, #1024*3
add x10, x2, #1024*3
#if L > 4
add x11, x1, #1024*4
add x12, x2, #1024*4
#endif
#if L > 5
add x13, x11, #1024*1
add x14, x12, #1024*1
add x15, x11, #1024*2
add x19, x12, #1024*2
#endif
dup v30.4S, w20
dup v31.4S, w21
ld1 { v0.4S}, [x1], #16
ld1 { v1.4S}, [x1], #16
ld1 { v2.4S}, [x1], #16
ld1 { v3.4S}, [x1], #16
ld1 { v4.4S}, [x2], #16
ld1 { v5.4S}, [x2], #16
ld1 { v6.4S}, [x2], #16
ld1 { v7.4S}, [x2], #16
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x5], #16
ld1 { v4.4S}, [ x6], #16
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x5], #16
ld1 { v5.4S}, [ x6], #16
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x5], #16
ld1 { v6.4S}, [ x6], #16
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x5], #16
ld1 { v7.4S}, [ x6], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x7], #16
ld1 { v4.4S}, [ x8], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x7], #16
ld1 { v5.4S}, [ x8], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x7], #16
ld1 { v6.4S}, [ x8], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x7], #16
ld1 { v7.4S}, [ x8], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x9], #16
ld1 { v4.4S}, [x10], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x9], #16
ld1 { v5.4S}, [x10], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x9], #16
ld1 { v6.4S}, [x10], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x9], #16
ld1 { v7.4S}, [x10], #16
#if L > 4
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x11], #16
ld1 { v4.4S}, [x12], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x11], #16
ld1 { v5.4S}, [x12], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x11], #16
ld1 { v6.4S}, [x12], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x11], #16
ld1 { v7.4S}, [x12], #16
#endif
#if L > 5
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x13], #16
ld1 { v4.4S}, [x14], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x13], #16
ld1 { v5.4S}, [x14], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x13], #16
ld1 { v6.4S}, [x14], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x13], #16
ld1 { v7.4S}, [x14], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x15], #16
ld1 { v4.4S}, [x19], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x15], #16
ld1 { v5.4S}, [x19], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x15], #16
ld1 { v6.4S}, [x19], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x15], #16
ld1 { v7.4S}, [x19], #16
#endif
mov x16, #15
_polyvecl_pointwise_acc_montgomery_loop:
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
ld1 { v0.4S}, [x1], #16
uzp1 v21.4S, v13.4S, v17.4S
ld1 { v1.4S}, [x1], #16
uzp1 v22.4S, v14.4S, v18.4S
ld1 { v2.4S}, [x1], #16
uzp1 v23.4S, v15.4S, v19.4S
ld1 { v3.4S}, [x1], #16
mul v24.4S, v20.4S, v31.4S
ld1 { v4.4S}, [x2], #16
mul v25.4S, v21.4S, v31.4S
ld1 { v5.4S}, [x2], #16
mul v26.4S, v22.4S, v31.4S
ld1 { v6.4S}, [x2], #16
mul v27.4S, v23.4S, v31.4S
ld1 { v7.4S}, [x2], #16
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
smull v12.2D, v0.2S, v4.2S
smull2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x5], #16
st1 {v24.4S}, [x0], #16
ld1 { v4.4S}, [ x6], #16
smull v13.2D, v1.2S, v5.2S
smull2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x5], #16
st1 {v25.4S}, [x0], #16
ld1 { v5.4S}, [ x6], #16
smull v14.2D, v2.2S, v6.2S
smull2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x5], #16
st1 {v26.4S}, [x0], #16
ld1 { v6.4S}, [ x6], #16
smull v15.2D, v3.2S, v7.2S
smull2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x5], #16
st1 {v27.4S}, [x0], #16
ld1 { v7.4S}, [ x6], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x7], #16
ld1 { v4.4S}, [ x8], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x7], #16
ld1 { v5.4S}, [ x8], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x7], #16
ld1 { v6.4S}, [ x8], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x7], #16
ld1 { v7.4S}, [ x8], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [ x9], #16
ld1 { v4.4S}, [x10], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [ x9], #16
ld1 { v5.4S}, [x10], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [ x9], #16
ld1 { v6.4S}, [x10], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [ x9], #16
ld1 { v7.4S}, [x10], #16
#if L > 4
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x11], #16
ld1 { v4.4S}, [x12], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x11], #16
ld1 { v5.4S}, [x12], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x11], #16
ld1 { v6.4S}, [x12], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x11], #16
ld1 { v7.4S}, [x12], #16
#endif
#if L > 5
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x13], #16
ld1 { v4.4S}, [x14], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x13], #16
ld1 { v5.4S}, [x14], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x13], #16
ld1 { v6.4S}, [x14], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x13], #16
ld1 { v7.4S}, [x14], #16
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
ld1 { v0.4S}, [x15], #16
ld1 { v4.4S}, [x19], #16
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
ld1 { v1.4S}, [x15], #16
ld1 { v5.4S}, [x19], #16
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
ld1 { v2.4S}, [x15], #16
ld1 { v6.4S}, [x19], #16
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
ld1 { v3.4S}, [x15], #16
ld1 { v7.4S}, [x19], #16
#endif
sub x16, x16, #1
cbnz x16, _polyvecl_pointwise_acc_montgomery_loop
smlal v12.2D, v0.2S, v4.2S
smlal2 v16.2D, v0.4S, v4.4S
smlal v13.2D, v1.2S, v5.2S
smlal2 v17.2D, v1.4S, v5.4S
smlal v14.2D, v2.2S, v6.2S
smlal2 v18.2D, v2.4S, v6.4S
smlal v15.2D, v3.2S, v7.2S
smlal2 v19.2D, v3.4S, v7.4S
uzp1 v20.4S, v12.4S, v16.4S
uzp1 v21.4S, v13.4S, v17.4S
uzp1 v22.4S, v14.4S, v18.4S
uzp1 v23.4S, v15.4S, v19.4S
mul v24.4S, v20.4S, v31.4S
mul v25.4S, v21.4S, v31.4S
mul v26.4S, v22.4S, v31.4S
mul v27.4S, v23.4S, v31.4S
smlal v12.2D, v24.2S, v30.2S
smlal2 v16.2D, v24.4S, v30.4S
smlal v13.2D, v25.2S, v30.2S
smlal2 v17.2D, v25.4S, v30.4S
smlal v14.2D, v26.2S, v30.2S
smlal2 v18.2D, v26.4S, v30.4S
smlal v15.2D, v27.2S, v30.2S
smlal2 v19.2D, v27.4S, v30.4S
uzp2 v24.4S, v12.4S, v16.4S
uzp2 v25.4S, v13.4S, v17.4S
uzp2 v26.4S, v14.4S, v18.4S
uzp2 v27.4S, v15.4S, v19.4S
st1 {v24.4S}, [x0], #16
st1 {v25.4S}, [x0], #16
st1 {v26.4S}, [x0], #16
st1 {v27.4S}, [x0], #16
pop_all
ret
|
mktmansour/MKT-KSA-Geolocation-Security
| 3,689
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/sphincs-shake-256s-simple/aarch64/f1600x2.s
|
# From https://github.com/bwesterb/armed-keccak
.macro round
# Execute theta, but without xoring into the state yet.
# Compute parities p[i] = a[i] ^ a[5+i] ^ ... ^ a[20+i].
eor3.16b v25, v0, v5, v10
eor3.16b v26, v1, v6, v11
eor3.16b v27, v2, v7, v12
eor3.16b v28, v3, v8, v13
eor3.16b v29, v4, v9, v14
eor3.16b v25, v25, v15, v20
eor3.16b v26, v26, v16, v21
eor3.16b v27, v27, v17, v22
eor3.16b v28, v28, v18, v23
eor3.16b v29, v29, v19, v24
# d[0] = rotl(p[1], 1) ^ p[4]
rax1.2d v30, v29, v26
# d[3] = rotl(p[4], 1) ^ p[2]
rax1.2d v29, v27, v29
# d[1] = rotl(p[2], 1) ^ p[0]
rax1.2d v27, v25, v27
# d[4] = rotl(p[0], 1) ^ p[3]
rax1.2d v25, v28, v25
# d[2] = rotl(p[3], 1) ^ p[1]
rax1.2d v28, v26, v28
# Xor parities from step theta into the state at the same time
# as executing rho and pi.
eor.16b v0, v0, v30
mov.16b v31, v1
xar.2d v1, v6, v27, 20
xar.2d v6, v9, v25, 44
xar.2d v9, v22, v28, 3
xar.2d v22, v14, v25, 25
xar.2d v14, v20, v30, 46
xar.2d v20, v2, v28, 2
xar.2d v2, v12, v28, 21
xar.2d v12, v13, v29, 39
xar.2d v13, v19, v25, 56
xar.2d v19, v23, v29, 8
xar.2d v23, v15, v30, 23
xar.2d v15, v4, v25, 37
xar.2d v4, v24, v25, 50
xar.2d v24, v21, v27, 62
xar.2d v21, v8, v29, 9
xar.2d v8, v16, v27, 19
xar.2d v16, v5, v30, 28
xar.2d v5, v3, v29, 36
xar.2d v3, v18, v29, 43
xar.2d v18, v17, v28, 49
xar.2d v17, v11, v27, 54
xar.2d v11, v7, v28, 58
xar.2d v7, v10, v30, 61
xar.2d v10, v31, v27, 63
# Chi
bcax.16b v25, v0, v2, v1
bcax.16b v26, v1, v3, v2
bcax.16b v2, v2, v4, v3
bcax.16b v3, v3, v0, v4
bcax.16b v4, v4, v1, v0
mov.16b v0, v25
mov.16b v1, v26
bcax.16b v25, v5, v7, v6
bcax.16b v26, v6, v8, v7
bcax.16b v7, v7, v9, v8
bcax.16b v8, v8, v5, v9
bcax.16b v9, v9, v6, v5
mov.16b v5, v25
mov.16b v6, v26
bcax.16b v25, v10, v12, v11
bcax.16b v26, v11, v13, v12
bcax.16b v12, v12, v14, v13
bcax.16b v13, v13, v10, v14
bcax.16b v14, v14, v11, v10
mov.16b v10, v25
mov.16b v11, v26
bcax.16b v25, v15, v17, v16
bcax.16b v26, v16, v18, v17
bcax.16b v17, v17, v19, v18
bcax.16b v18, v18, v15, v19
bcax.16b v19, v19, v16, v15
mov.16b v15, v25
mov.16b v16, v26
bcax.16b v25, v20, v22, v21
bcax.16b v26, v21, v23, v22
bcax.16b v22, v22, v24, v23
bcax.16b v23, v23, v20, v24
bcax.16b v24, v24, v21, v20
mov.16b v20, v25
mov.16b v21, v26
# iota
ld1r {v25.2d}, [x1], #8
eor.16b v0, v0, v25
.endm
.align 4
.global __f1600x2
__f1600x2:
stp d8, d9, [sp,#-16]!
stp d10, d11, [sp,#-16]!
stp d12, d13, [sp,#-16]!
stp d14, d15, [sp,#-16]!
mov x2, x0
mov x3, #24
ld1.2d {v0, v1, v2, v3}, [x0], #64
ld1.2d {v4, v5, v6, v7}, [x0], #64
ld1.2d {v8, v9, v10, v11}, [x0], #64
ld1.2d {v12, v13, v14, v15}, [x0], #64
ld1.2d {v16, v17, v18, v19}, [x0], #64
ld1.2d {v20, v21, v22, v23}, [x0], #64
ld1.2d {v24}, [x0]
loop:
round
subs x3, x3, #1
cbnz x3, loop
mov x0, x2
st1.2d {v0, v1, v2, v3}, [x0], #64
st1.2d {v4, v5, v6, v7}, [x0], #64
st1.2d {v8, v9, v10, v11}, [x0], #64
st1.2d {v12, v13, v14, v15}, [x0], #64
st1.2d {v16, v17, v18, v19}, [x0], #64
st1.2d {v20, v21, v22, v23}, [x0], #64
st1.2d {v24}, [x0]
ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16
ret lr
|
mktmansour/MKT-KSA-Geolocation-Security
| 3,689
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/sphincs-shake-128s-simple/aarch64/f1600x2.s
|
# From https://github.com/bwesterb/armed-keccak
.macro round
# Execute theta, but without xoring into the state yet.
# Compute parities p[i] = a[i] ^ a[5+i] ^ ... ^ a[20+i].
eor3.16b v25, v0, v5, v10
eor3.16b v26, v1, v6, v11
eor3.16b v27, v2, v7, v12
eor3.16b v28, v3, v8, v13
eor3.16b v29, v4, v9, v14
eor3.16b v25, v25, v15, v20
eor3.16b v26, v26, v16, v21
eor3.16b v27, v27, v17, v22
eor3.16b v28, v28, v18, v23
eor3.16b v29, v29, v19, v24
# d[0] = rotl(p[1], 1) ^ p[4]
rax1.2d v30, v29, v26
# d[3] = rotl(p[4], 1) ^ p[2]
rax1.2d v29, v27, v29
# d[1] = rotl(p[2], 1) ^ p[0]
rax1.2d v27, v25, v27
# d[4] = rotl(p[0], 1) ^ p[3]
rax1.2d v25, v28, v25
# d[2] = rotl(p[3], 1) ^ p[1]
rax1.2d v28, v26, v28
# Xor parities from step theta into the state at the same time
# as executing rho and pi.
eor.16b v0, v0, v30
mov.16b v31, v1
xar.2d v1, v6, v27, 20
xar.2d v6, v9, v25, 44
xar.2d v9, v22, v28, 3
xar.2d v22, v14, v25, 25
xar.2d v14, v20, v30, 46
xar.2d v20, v2, v28, 2
xar.2d v2, v12, v28, 21
xar.2d v12, v13, v29, 39
xar.2d v13, v19, v25, 56
xar.2d v19, v23, v29, 8
xar.2d v23, v15, v30, 23
xar.2d v15, v4, v25, 37
xar.2d v4, v24, v25, 50
xar.2d v24, v21, v27, 62
xar.2d v21, v8, v29, 9
xar.2d v8, v16, v27, 19
xar.2d v16, v5, v30, 28
xar.2d v5, v3, v29, 36
xar.2d v3, v18, v29, 43
xar.2d v18, v17, v28, 49
xar.2d v17, v11, v27, 54
xar.2d v11, v7, v28, 58
xar.2d v7, v10, v30, 61
xar.2d v10, v31, v27, 63
# Chi
bcax.16b v25, v0, v2, v1
bcax.16b v26, v1, v3, v2
bcax.16b v2, v2, v4, v3
bcax.16b v3, v3, v0, v4
bcax.16b v4, v4, v1, v0
mov.16b v0, v25
mov.16b v1, v26
bcax.16b v25, v5, v7, v6
bcax.16b v26, v6, v8, v7
bcax.16b v7, v7, v9, v8
bcax.16b v8, v8, v5, v9
bcax.16b v9, v9, v6, v5
mov.16b v5, v25
mov.16b v6, v26
bcax.16b v25, v10, v12, v11
bcax.16b v26, v11, v13, v12
bcax.16b v12, v12, v14, v13
bcax.16b v13, v13, v10, v14
bcax.16b v14, v14, v11, v10
mov.16b v10, v25
mov.16b v11, v26
bcax.16b v25, v15, v17, v16
bcax.16b v26, v16, v18, v17
bcax.16b v17, v17, v19, v18
bcax.16b v18, v18, v15, v19
bcax.16b v19, v19, v16, v15
mov.16b v15, v25
mov.16b v16, v26
bcax.16b v25, v20, v22, v21
bcax.16b v26, v21, v23, v22
bcax.16b v22, v22, v24, v23
bcax.16b v23, v23, v20, v24
bcax.16b v24, v24, v21, v20
mov.16b v20, v25
mov.16b v21, v26
# iota
ld1r {v25.2d}, [x1], #8
eor.16b v0, v0, v25
.endm
.align 4
.global __f1600x2
__f1600x2:
stp d8, d9, [sp,#-16]!
stp d10, d11, [sp,#-16]!
stp d12, d13, [sp,#-16]!
stp d14, d15, [sp,#-16]!
mov x2, x0
mov x3, #24
ld1.2d {v0, v1, v2, v3}, [x0], #64
ld1.2d {v4, v5, v6, v7}, [x0], #64
ld1.2d {v8, v9, v10, v11}, [x0], #64
ld1.2d {v12, v13, v14, v15}, [x0], #64
ld1.2d {v16, v17, v18, v19}, [x0], #64
ld1.2d {v20, v21, v22, v23}, [x0], #64
ld1.2d {v24}, [x0]
loop:
round
subs x3, x3, #1
cbnz x3, loop
mov x0, x2
st1.2d {v0, v1, v2, v3}, [x0], #64
st1.2d {v4, v5, v6, v7}, [x0], #64
st1.2d {v8, v9, v10, v11}, [x0], #64
st1.2d {v12, v13, v14, v15}, [x0], #64
st1.2d {v16, v17, v18, v19}, [x0], #64
st1.2d {v20, v21, v22, v23}, [x0], #64
st1.2d {v24}, [x0]
ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16
ret lr
|
mktmansour/MKT-KSA-Geolocation-Security
| 3,689
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/sphincs-shake-192f-simple/aarch64/f1600x2.s
|
# From https://github.com/bwesterb/armed-keccak
.macro round
# Execute theta, but without xoring into the state yet.
# Compute parities p[i] = a[i] ^ a[5+i] ^ ... ^ a[20+i].
eor3.16b v25, v0, v5, v10
eor3.16b v26, v1, v6, v11
eor3.16b v27, v2, v7, v12
eor3.16b v28, v3, v8, v13
eor3.16b v29, v4, v9, v14
eor3.16b v25, v25, v15, v20
eor3.16b v26, v26, v16, v21
eor3.16b v27, v27, v17, v22
eor3.16b v28, v28, v18, v23
eor3.16b v29, v29, v19, v24
# d[0] = rotl(p[1], 1) ^ p[4]
rax1.2d v30, v29, v26
# d[3] = rotl(p[4], 1) ^ p[2]
rax1.2d v29, v27, v29
# d[1] = rotl(p[2], 1) ^ p[0]
rax1.2d v27, v25, v27
# d[4] = rotl(p[0], 1) ^ p[3]
rax1.2d v25, v28, v25
# d[2] = rotl(p[3], 1) ^ p[1]
rax1.2d v28, v26, v28
# Xor parities from step theta into the state at the same time
# as executing rho and pi.
eor.16b v0, v0, v30
mov.16b v31, v1
xar.2d v1, v6, v27, 20
xar.2d v6, v9, v25, 44
xar.2d v9, v22, v28, 3
xar.2d v22, v14, v25, 25
xar.2d v14, v20, v30, 46
xar.2d v20, v2, v28, 2
xar.2d v2, v12, v28, 21
xar.2d v12, v13, v29, 39
xar.2d v13, v19, v25, 56
xar.2d v19, v23, v29, 8
xar.2d v23, v15, v30, 23
xar.2d v15, v4, v25, 37
xar.2d v4, v24, v25, 50
xar.2d v24, v21, v27, 62
xar.2d v21, v8, v29, 9
xar.2d v8, v16, v27, 19
xar.2d v16, v5, v30, 28
xar.2d v5, v3, v29, 36
xar.2d v3, v18, v29, 43
xar.2d v18, v17, v28, 49
xar.2d v17, v11, v27, 54
xar.2d v11, v7, v28, 58
xar.2d v7, v10, v30, 61
xar.2d v10, v31, v27, 63
# Chi
bcax.16b v25, v0, v2, v1
bcax.16b v26, v1, v3, v2
bcax.16b v2, v2, v4, v3
bcax.16b v3, v3, v0, v4
bcax.16b v4, v4, v1, v0
mov.16b v0, v25
mov.16b v1, v26
bcax.16b v25, v5, v7, v6
bcax.16b v26, v6, v8, v7
bcax.16b v7, v7, v9, v8
bcax.16b v8, v8, v5, v9
bcax.16b v9, v9, v6, v5
mov.16b v5, v25
mov.16b v6, v26
bcax.16b v25, v10, v12, v11
bcax.16b v26, v11, v13, v12
bcax.16b v12, v12, v14, v13
bcax.16b v13, v13, v10, v14
bcax.16b v14, v14, v11, v10
mov.16b v10, v25
mov.16b v11, v26
bcax.16b v25, v15, v17, v16
bcax.16b v26, v16, v18, v17
bcax.16b v17, v17, v19, v18
bcax.16b v18, v18, v15, v19
bcax.16b v19, v19, v16, v15
mov.16b v15, v25
mov.16b v16, v26
bcax.16b v25, v20, v22, v21
bcax.16b v26, v21, v23, v22
bcax.16b v22, v22, v24, v23
bcax.16b v23, v23, v20, v24
bcax.16b v24, v24, v21, v20
mov.16b v20, v25
mov.16b v21, v26
# iota
ld1r {v25.2d}, [x1], #8
eor.16b v0, v0, v25
.endm
.align 4
.global __f1600x2
__f1600x2:
stp d8, d9, [sp,#-16]!
stp d10, d11, [sp,#-16]!
stp d12, d13, [sp,#-16]!
stp d14, d15, [sp,#-16]!
mov x2, x0
mov x3, #24
ld1.2d {v0, v1, v2, v3}, [x0], #64
ld1.2d {v4, v5, v6, v7}, [x0], #64
ld1.2d {v8, v9, v10, v11}, [x0], #64
ld1.2d {v12, v13, v14, v15}, [x0], #64
ld1.2d {v16, v17, v18, v19}, [x0], #64
ld1.2d {v20, v21, v22, v23}, [x0], #64
ld1.2d {v24}, [x0]
loop:
round
subs x3, x3, #1
cbnz x3, loop
mov x0, x2
st1.2d {v0, v1, v2, v3}, [x0], #64
st1.2d {v4, v5, v6, v7}, [x0], #64
st1.2d {v8, v9, v10, v11}, [x0], #64
st1.2d {v12, v13, v14, v15}, [x0], #64
st1.2d {v16, v17, v18, v19}, [x0], #64
st1.2d {v20, v21, v22, v23}, [x0], #64
st1.2d {v24}, [x0]
ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16
ret lr
|
mktmansour/MKT-KSA-Geolocation-Security
| 3,689
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/pqcrypto-mlkem-0.1.1/pqclean/crypto_sign/sphincs-shake-256f-simple/aarch64/f1600x2.s
|
# From https://github.com/bwesterb/armed-keccak
.macro round
# Execute theta, but without xoring into the state yet.
# Compute parities p[i] = a[i] ^ a[5+i] ^ ... ^ a[20+i].
eor3.16b v25, v0, v5, v10
eor3.16b v26, v1, v6, v11
eor3.16b v27, v2, v7, v12
eor3.16b v28, v3, v8, v13
eor3.16b v29, v4, v9, v14
eor3.16b v25, v25, v15, v20
eor3.16b v26, v26, v16, v21
eor3.16b v27, v27, v17, v22
eor3.16b v28, v28, v18, v23
eor3.16b v29, v29, v19, v24
# d[0] = rotl(p[1], 1) ^ p[4]
rax1.2d v30, v29, v26
# d[3] = rotl(p[4], 1) ^ p[2]
rax1.2d v29, v27, v29
# d[1] = rotl(p[2], 1) ^ p[0]
rax1.2d v27, v25, v27
# d[4] = rotl(p[0], 1) ^ p[3]
rax1.2d v25, v28, v25
# d[2] = rotl(p[3], 1) ^ p[1]
rax1.2d v28, v26, v28
# Xor parities from step theta into the state at the same time
# as executing rho and pi.
eor.16b v0, v0, v30
mov.16b v31, v1
xar.2d v1, v6, v27, 20
xar.2d v6, v9, v25, 44
xar.2d v9, v22, v28, 3
xar.2d v22, v14, v25, 25
xar.2d v14, v20, v30, 46
xar.2d v20, v2, v28, 2
xar.2d v2, v12, v28, 21
xar.2d v12, v13, v29, 39
xar.2d v13, v19, v25, 56
xar.2d v19, v23, v29, 8
xar.2d v23, v15, v30, 23
xar.2d v15, v4, v25, 37
xar.2d v4, v24, v25, 50
xar.2d v24, v21, v27, 62
xar.2d v21, v8, v29, 9
xar.2d v8, v16, v27, 19
xar.2d v16, v5, v30, 28
xar.2d v5, v3, v29, 36
xar.2d v3, v18, v29, 43
xar.2d v18, v17, v28, 49
xar.2d v17, v11, v27, 54
xar.2d v11, v7, v28, 58
xar.2d v7, v10, v30, 61
xar.2d v10, v31, v27, 63
# Chi
bcax.16b v25, v0, v2, v1
bcax.16b v26, v1, v3, v2
bcax.16b v2, v2, v4, v3
bcax.16b v3, v3, v0, v4
bcax.16b v4, v4, v1, v0
mov.16b v0, v25
mov.16b v1, v26
bcax.16b v25, v5, v7, v6
bcax.16b v26, v6, v8, v7
bcax.16b v7, v7, v9, v8
bcax.16b v8, v8, v5, v9
bcax.16b v9, v9, v6, v5
mov.16b v5, v25
mov.16b v6, v26
bcax.16b v25, v10, v12, v11
bcax.16b v26, v11, v13, v12
bcax.16b v12, v12, v14, v13
bcax.16b v13, v13, v10, v14
bcax.16b v14, v14, v11, v10
mov.16b v10, v25
mov.16b v11, v26
bcax.16b v25, v15, v17, v16
bcax.16b v26, v16, v18, v17
bcax.16b v17, v17, v19, v18
bcax.16b v18, v18, v15, v19
bcax.16b v19, v19, v16, v15
mov.16b v15, v25
mov.16b v16, v26
bcax.16b v25, v20, v22, v21
bcax.16b v26, v21, v23, v22
bcax.16b v22, v22, v24, v23
bcax.16b v23, v23, v20, v24
bcax.16b v24, v24, v21, v20
mov.16b v20, v25
mov.16b v21, v26
# iota
ld1r {v25.2d}, [x1], #8
eor.16b v0, v0, v25
.endm
.align 4
.global __f1600x2
__f1600x2:
stp d8, d9, [sp,#-16]!
stp d10, d11, [sp,#-16]!
stp d12, d13, [sp,#-16]!
stp d14, d15, [sp,#-16]!
mov x2, x0
mov x3, #24
ld1.2d {v0, v1, v2, v3}, [x0], #64
ld1.2d {v4, v5, v6, v7}, [x0], #64
ld1.2d {v8, v9, v10, v11}, [x0], #64
ld1.2d {v12, v13, v14, v15}, [x0], #64
ld1.2d {v16, v17, v18, v19}, [x0], #64
ld1.2d {v20, v21, v22, v23}, [x0], #64
ld1.2d {v24}, [x0]
loop:
round
subs x3, x3, #1
cbnz x3, loop
mov x0, x2
st1.2d {v0, v1, v2, v3}, [x0], #64
st1.2d {v4, v5, v6, v7}, [x0], #64
st1.2d {v8, v9, v10, v11}, [x0], #64
st1.2d {v12, v13, v14, v15}, [x0], #64
st1.2d {v16, v17, v18, v19}, [x0], #64
st1.2d {v20, v21, v22, v23}, [x0], #64
st1.2d {v24}, [x0]
ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16
ret lr
|
mktmansour/MKT-KSA-Geolocation-Security
| 40,185
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha-armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.section __TEXT,__const
.align 5
Lsigma:
.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
Lone:
.long 1,0,0,0
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
.globl _ChaCha20_ctr32_nohw
.private_extern _ChaCha20_ctr32_nohw
.align 5
_ChaCha20_ctr32_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ldp x28,x30,[x4] // load counter
#ifdef __AARCH64EB__
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
Loop_outer:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov w7,w23
lsr x8,x23,#32
mov w9,w24
lsr x10,x24,#32
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#64
Loop:
sub x4,x4,#1
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
ror w21,w21,#16
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#20
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
ror w21,w21,#24
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#25
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#16
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
ror w9,w9,#20
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#24
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
ror w9,w9,#25
cbnz x4,Loop
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
b.lo Ltail
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.hi Loop_outer
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 4
Ltail:
add x2,x2,#64
Less_than_64:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
stp x5,x7,[sp,#0]
stp x9,x11,[sp,#16]
stp x13,x15,[sp,#32]
stp x17,x20,[sp,#48]
Loop_tail:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _ChaCha20_ctr32_neon
.private_extern _ChaCha20_ctr32_neon
.align 5
_ChaCha20_ctr32_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
cmp x2,#512
b.hs L512_or_more_neon
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
Loop_outer_neon:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov v0.16b,v24.16b
mov w7,w23
lsr x8,x23,#32
mov v4.16b,v24.16b
mov w9,w24
lsr x10,x24,#32
mov v16.16b,v24.16b
mov w11,w25
mov v1.16b,v25.16b
lsr x12,x25,#32
mov v5.16b,v25.16b
mov w13,w26
mov v17.16b,v25.16b
lsr x14,x26,#32
mov v3.16b,v27.16b
mov w15,w27
mov v7.16b,v28.16b
lsr x16,x27,#32
mov v19.16b,v29.16b
mov w17,w28
mov v2.16b,v26.16b
lsr x19,x28,#32
mov v6.16b,v26.16b
mov w20,w30
mov v18.16b,v26.16b
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#256
Loop_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v16.4s,v16.4s,v17.4s
add w7,w7,w11
eor v3.16b,v3.16b,v0.16b
add w8,w8,w12
eor v7.16b,v7.16b,v4.16b
eor w17,w17,w5
eor v19.16b,v19.16b,v16.16b
eor w19,w19,w6
rev32 v3.8h,v3.8h
eor w20,w20,w7
rev32 v7.8h,v7.8h
eor w21,w21,w8
rev32 v19.8h,v19.8h
ror w17,w17,#16
add v2.4s,v2.4s,v3.4s
ror w19,w19,#16
add v6.4s,v6.4s,v7.4s
ror w20,w20,#16
add v18.4s,v18.4s,v19.4s
ror w21,w21,#16
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#20
add w16,w16,w21
ushr v5.4s,v21.4s,#20
eor w9,w9,w13
ushr v17.4s,v22.4s,#20
eor w10,w10,w14
sli v1.4s,v20.4s,#12
eor w11,w11,w15
sli v5.4s,v21.4s,#12
eor w12,w12,w16
sli v17.4s,v22.4s,#12
ror w9,w9,#20
add v0.4s,v0.4s,v1.4s
ror w10,w10,#20
add v4.4s,v4.4s,v5.4s
ror w11,w11,#20
add v16.4s,v16.4s,v17.4s
ror w12,w12,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w9
eor v21.16b,v7.16b,v4.16b
add w6,w6,w10
eor v22.16b,v19.16b,v16.16b
add w7,w7,w11
ushr v3.4s,v20.4s,#24
add w8,w8,w12
ushr v7.4s,v21.4s,#24
eor w17,w17,w5
ushr v19.4s,v22.4s,#24
eor w19,w19,w6
sli v3.4s,v20.4s,#8
eor w20,w20,w7
sli v7.4s,v21.4s,#8
eor w21,w21,w8
sli v19.4s,v22.4s,#8
ror w17,w17,#24
add v2.4s,v2.4s,v3.4s
ror w19,w19,#24
add v6.4s,v6.4s,v7.4s
ror w20,w20,#24
add v18.4s,v18.4s,v19.4s
ror w21,w21,#24
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#25
add w16,w16,w21
ushr v5.4s,v21.4s,#25
eor w9,w9,w13
ushr v17.4s,v22.4s,#25
eor w10,w10,w14
sli v1.4s,v20.4s,#7
eor w11,w11,w15
sli v5.4s,v21.4s,#7
eor w12,w12,w16
sli v17.4s,v22.4s,#7
ror w9,w9,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w10,w10,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w10
add v4.4s,v4.4s,v5.4s
add w6,w6,w11
add v16.4s,v16.4s,v17.4s
add w7,w7,w12
eor v3.16b,v3.16b,v0.16b
add w8,w8,w9
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w5
eor v19.16b,v19.16b,v16.16b
eor w17,w17,w6
rev32 v3.8h,v3.8h
eor w19,w19,w7
rev32 v7.8h,v7.8h
eor w20,w20,w8
rev32 v19.8h,v19.8h
ror w21,w21,#16
add v2.4s,v2.4s,v3.4s
ror w17,w17,#16
add v6.4s,v6.4s,v7.4s
ror w19,w19,#16
add v18.4s,v18.4s,v19.4s
ror w20,w20,#16
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#20
add w14,w14,w20
ushr v5.4s,v21.4s,#20
eor w10,w10,w15
ushr v17.4s,v22.4s,#20
eor w11,w11,w16
sli v1.4s,v20.4s,#12
eor w12,w12,w13
sli v5.4s,v21.4s,#12
eor w9,w9,w14
sli v17.4s,v22.4s,#12
ror w10,w10,#20
add v0.4s,v0.4s,v1.4s
ror w11,w11,#20
add v4.4s,v4.4s,v5.4s
ror w12,w12,#20
add v16.4s,v16.4s,v17.4s
ror w9,w9,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w10
eor v21.16b,v7.16b,v4.16b
add w6,w6,w11
eor v22.16b,v19.16b,v16.16b
add w7,w7,w12
ushr v3.4s,v20.4s,#24
add w8,w8,w9
ushr v7.4s,v21.4s,#24
eor w21,w21,w5
ushr v19.4s,v22.4s,#24
eor w17,w17,w6
sli v3.4s,v20.4s,#8
eor w19,w19,w7
sli v7.4s,v21.4s,#8
eor w20,w20,w8
sli v19.4s,v22.4s,#8
ror w21,w21,#24
add v2.4s,v2.4s,v3.4s
ror w17,w17,#24
add v6.4s,v6.4s,v7.4s
ror w19,w19,#24
add v18.4s,v18.4s,v19.4s
ror w20,w20,#24
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#25
add w14,w14,w20
ushr v5.4s,v21.4s,#25
eor w10,w10,w15
ushr v17.4s,v22.4s,#25
eor w11,w11,w16
sli v1.4s,v20.4s,#7
eor w12,w12,w13
sli v5.4s,v21.4s,#7
eor w9,w9,w14
sli v17.4s,v22.4s,#7
ror w10,w10,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w11,w11,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w12,w12,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
cbnz x4,Loop_neon
add w5,w5,w22 // accumulate key block
add v0.4s,v0.4s,v24.4s
add x6,x6,x22,lsr#32
add v4.4s,v4.4s,v24.4s
add w7,w7,w23
add v16.4s,v16.4s,v24.4s
add x8,x8,x23,lsr#32
add v2.4s,v2.4s,v26.4s
add w9,w9,w24
add v6.4s,v6.4s,v26.4s
add x10,x10,x24,lsr#32
add v18.4s,v18.4s,v26.4s
add w11,w11,w25
add v3.4s,v3.4s,v27.4s
add x12,x12,x25,lsr#32
add w13,w13,w26
add v7.4s,v7.4s,v28.4s
add x14,x14,x26,lsr#32
add w15,w15,w27
add v19.4s,v19.4s,v29.4s
add x16,x16,x27,lsr#32
add w17,w17,w28
add v1.4s,v1.4s,v25.4s
add x19,x19,x28,lsr#32
add w20,w20,w30
add v5.4s,v5.4s,v25.4s
add x21,x21,x30,lsr#32
add v17.4s,v17.4s,v25.4s
b.lo Ltail_neon
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v20.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v21.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v22.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v23.16b
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
add v27.4s,v27.4s,v31.4s // += 4
stp x13,x15,[x0,#32]
add v28.4s,v28.4s,v31.4s
stp x17,x20,[x0,#48]
add v29.4s,v29.4s,v31.4s
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
eor v16.16b,v16.16b,v0.16b
eor v17.16b,v17.16b,v1.16b
eor v18.16b,v18.16b,v2.16b
eor v19.16b,v19.16b,v3.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
b.hi Loop_outer_neon
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
Ltail_neon:
add x2,x2,#256
cmp x2,#64
b.lo Less_than_64
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_128
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v0.16b,v0.16b,v20.16b
eor v1.16b,v1.16b,v21.16b
eor v2.16b,v2.16b,v22.16b
eor v3.16b,v3.16b,v23.16b
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_192
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp]
b Last_neon
Less_than_128:
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp]
b Last_neon
Less_than_192:
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp]
b Last_neon
.align 4
Last_neon:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
Loop_tail_neon:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail_neon
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
Ldone_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 5
ChaCha20_512_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
L512_or_more_neon:
sub sp,sp,#128+64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
stp q24,q25,[sp,#0] // off-load key block, invariant part
add v27.4s,v27.4s,v31.4s // not typo
str q26,[sp,#32]
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
add v30.4s,v29.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
stp d8,d9,[sp,#128+0] // meet ABI requirements
stp d10,d11,[sp,#128+16]
stp d12,d13,[sp,#128+32]
stp d14,d15,[sp,#128+48]
sub x2,x2,#512 // not typo
Loop_outer_512_neon:
mov v0.16b,v24.16b
mov v4.16b,v24.16b
mov v8.16b,v24.16b
mov v12.16b,v24.16b
mov v16.16b,v24.16b
mov v20.16b,v24.16b
mov v1.16b,v25.16b
mov w5,w22 // unpack key block
mov v5.16b,v25.16b
lsr x6,x22,#32
mov v9.16b,v25.16b
mov w7,w23
mov v13.16b,v25.16b
lsr x8,x23,#32
mov v17.16b,v25.16b
mov w9,w24
mov v21.16b,v25.16b
lsr x10,x24,#32
mov v3.16b,v27.16b
mov w11,w25
mov v7.16b,v28.16b
lsr x12,x25,#32
mov v11.16b,v29.16b
mov w13,w26
mov v15.16b,v30.16b
lsr x14,x26,#32
mov v2.16b,v26.16b
mov w15,w27
mov v6.16b,v26.16b
lsr x16,x27,#32
add v19.4s,v3.4s,v31.4s // +4
mov w17,w28
add v23.4s,v7.4s,v31.4s // +4
lsr x19,x28,#32
mov v10.16b,v26.16b
mov w20,w30
mov v14.16b,v26.16b
lsr x21,x30,#32
mov v18.16b,v26.16b
stp q27,q28,[sp,#48] // off-load key block, variable part
mov v22.16b,v26.16b
str q29,[sp,#80]
mov x4,#5
subs x2,x2,#512
Loop_upper_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_upper_neon
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
mov w5,w22 // unpack key block
lsr x6,x22,#32
stp x9,x11,[x0,#16]
mov w7,w23
lsr x8,x23,#32
stp x13,x15,[x0,#32]
mov w9,w24
lsr x10,x24,#32
stp x17,x20,[x0,#48]
add x0,x0,#64
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#5
Loop_lower_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_lower_neon
add w5,w5,w22 // accumulate key block
ldp q24,q25,[sp,#0]
add x6,x6,x22,lsr#32
ldp q26,q27,[sp,#32]
add w7,w7,w23
ldp q28,q29,[sp,#64]
add x8,x8,x23,lsr#32
add v0.4s,v0.4s,v24.4s
add w9,w9,w24
add v4.4s,v4.4s,v24.4s
add x10,x10,x24,lsr#32
add v8.4s,v8.4s,v24.4s
add w11,w11,w25
add v12.4s,v12.4s,v24.4s
add x12,x12,x25,lsr#32
add v16.4s,v16.4s,v24.4s
add w13,w13,w26
add v20.4s,v20.4s,v24.4s
add x14,x14,x26,lsr#32
add v2.4s,v2.4s,v26.4s
add w15,w15,w27
add v6.4s,v6.4s,v26.4s
add x16,x16,x27,lsr#32
add v10.4s,v10.4s,v26.4s
add w17,w17,w28
add v14.4s,v14.4s,v26.4s
add x19,x19,x28,lsr#32
add v18.4s,v18.4s,v26.4s
add w20,w20,w30
add v22.4s,v22.4s,v26.4s
add x21,x21,x30,lsr#32
add v19.4s,v19.4s,v31.4s // +4
add x5,x5,x6,lsl#32 // pack
add v23.4s,v23.4s,v31.4s // +4
add x7,x7,x8,lsl#32
add v3.4s,v3.4s,v27.4s
ldp x6,x8,[x1,#0] // load input
add v7.4s,v7.4s,v28.4s
add x9,x9,x10,lsl#32
add v11.4s,v11.4s,v29.4s
add x11,x11,x12,lsl#32
add v15.4s,v15.4s,v30.4s
ldp x10,x12,[x1,#16]
add v19.4s,v19.4s,v27.4s
add x13,x13,x14,lsl#32
add v23.4s,v23.4s,v28.4s
add x15,x15,x16,lsl#32
add v1.4s,v1.4s,v25.4s
ldp x14,x16,[x1,#32]
add v5.4s,v5.4s,v25.4s
add x17,x17,x19,lsl#32
add v9.4s,v9.4s,v25.4s
add x20,x20,x21,lsl#32
add v13.4s,v13.4s,v25.4s
ldp x19,x21,[x1,#48]
add v17.4s,v17.4s,v25.4s
add x1,x1,#64
add v21.4s,v21.4s,v25.4s
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v24.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v25.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v26.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v27.16b
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#7 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v24.16b
eor v5.16b,v5.16b,v25.16b
eor v6.16b,v6.16b,v26.16b
eor v7.16b,v7.16b,v27.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
eor v8.16b,v8.16b,v0.16b
ldp q24,q25,[sp,#0]
eor v9.16b,v9.16b,v1.16b
ldp q26,q27,[sp,#32]
eor v10.16b,v10.16b,v2.16b
eor v11.16b,v11.16b,v3.16b
st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64
ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64
eor v12.16b,v12.16b,v4.16b
eor v13.16b,v13.16b,v5.16b
eor v14.16b,v14.16b,v6.16b
eor v15.16b,v15.16b,v7.16b
st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64
ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64
eor v16.16b,v16.16b,v8.16b
eor v17.16b,v17.16b,v9.16b
eor v18.16b,v18.16b,v10.16b
eor v19.16b,v19.16b,v11.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
shl v0.4s,v31.4s,#1 // 4 -> 8
eor v20.16b,v20.16b,v12.16b
eor v21.16b,v21.16b,v13.16b
eor v22.16b,v22.16b,v14.16b
eor v23.16b,v23.16b,v15.16b
st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64
add v27.4s,v27.4s,v0.4s // += 8
add v28.4s,v28.4s,v0.4s
add v29.4s,v29.4s,v0.4s
add v30.4s,v30.4s,v0.4s
b.hs Loop_outer_512_neon
adds x2,x2,#512
ushr v0.4s,v31.4s,#2 // 4 -> 1
ldp d8,d9,[sp,#128+0] // meet ABI requirements
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
stp q24,q31,[sp,#0] // wipe off-load area
stp q24,q31,[sp,#32]
stp q24,q31,[sp,#64]
b.eq Ldone_512_neon
cmp x2,#192
sub v27.4s,v27.4s,v0.4s // -= 1
sub v28.4s,v28.4s,v0.4s
sub v29.4s,v29.4s,v0.4s
add sp,sp,#128
b.hs Loop_outer_neon
eor v25.16b,v25.16b,v25.16b
eor v26.16b,v26.16b,v26.16b
eor v27.16b,v27.16b,v27.16b
eor v28.16b,v28.16b,v28.16b
eor v29.16b,v29.16b,v29.16b
eor v30.16b,v30.16b,v30.16b
b Loop_outer
Ldone_512_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#128+64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 18,316
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aesni-gcm-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.p2align 5
_aesni_ctr32_ghash_6x:
vmovdqu 32(%r11),%xmm2
subq $6,%rdx
vpxor %xmm4,%xmm4,%xmm4
vmovdqu 0-128(%rcx),%xmm15
vpaddb %xmm2,%xmm1,%xmm10
vpaddb %xmm2,%xmm10,%xmm11
vpaddb %xmm2,%xmm11,%xmm12
vpaddb %xmm2,%xmm12,%xmm13
vpaddb %xmm2,%xmm13,%xmm14
vpxor %xmm15,%xmm1,%xmm9
vmovdqu %xmm4,16+8(%rsp)
jmp L$oop6x
.p2align 5
L$oop6x:
addl $100663296,%ebx
jc L$handle_ctr32
vmovdqu 0-32(%r9),%xmm3
vpaddb %xmm2,%xmm14,%xmm1
vpxor %xmm15,%xmm10,%xmm10
vpxor %xmm15,%xmm11,%xmm11
L$resume_ctr32:
vmovdqu %xmm1,(%r8)
vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5
vpxor %xmm15,%xmm12,%xmm12
vmovups 16-128(%rcx),%xmm2
vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6
xorq %r12,%r12
cmpq %r14,%r15
vaesenc %xmm2,%xmm9,%xmm9
vmovdqu 48+8(%rsp),%xmm0
vpxor %xmm15,%xmm13,%xmm13
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1
vaesenc %xmm2,%xmm10,%xmm10
vpxor %xmm15,%xmm14,%xmm14
setnc %r12b
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vmovdqu 16-32(%r9),%xmm3
negq %r12
vaesenc %xmm2,%xmm12,%xmm12
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5
vpxor %xmm4,%xmm8,%xmm8
vaesenc %xmm2,%xmm13,%xmm13
vpxor %xmm5,%xmm1,%xmm4
andq $0x60,%r12
vmovups 32-128(%rcx),%xmm15
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1
vaesenc %xmm2,%xmm14,%xmm14
vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2
leaq (%r14,%r12,1),%r14
vaesenc %xmm15,%xmm9,%xmm9
vpxor 16+8(%rsp),%xmm8,%xmm8
vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3
vmovdqu 64+8(%rsp),%xmm0
vaesenc %xmm15,%xmm10,%xmm10
movbeq 88(%r14),%r13
vaesenc %xmm15,%xmm11,%xmm11
movbeq 80(%r14),%r12
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,32+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,40+8(%rsp)
vmovdqu 48-32(%r9),%xmm5
vaesenc %xmm15,%xmm14,%xmm14
vmovups 48-128(%rcx),%xmm15
vpxor %xmm1,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm2,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2
vaesenc %xmm15,%xmm10,%xmm10
vpxor %xmm3,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3
vaesenc %xmm15,%xmm11,%xmm11
vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5
vmovdqu 80+8(%rsp),%xmm0
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vpxor %xmm1,%xmm4,%xmm4
vmovdqu 64-32(%r9),%xmm1
vaesenc %xmm15,%xmm14,%xmm14
vmovups 64-128(%rcx),%xmm15
vpxor %xmm2,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm3,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3
vaesenc %xmm15,%xmm10,%xmm10
movbeq 72(%r14),%r13
vpxor %xmm5,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5
vaesenc %xmm15,%xmm11,%xmm11
movbeq 64(%r14),%r12
vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1
vmovdqu 96+8(%rsp),%xmm0
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,48+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,56+8(%rsp)
vpxor %xmm2,%xmm4,%xmm4
vmovdqu 96-32(%r9),%xmm2
vaesenc %xmm15,%xmm14,%xmm14
vmovups 80-128(%rcx),%xmm15
vpxor %xmm3,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5
vaesenc %xmm15,%xmm10,%xmm10
movbeq 56(%r14),%r13
vpxor %xmm1,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1
vpxor 112+8(%rsp),%xmm8,%xmm8
vaesenc %xmm15,%xmm11,%xmm11
movbeq 48(%r14),%r12
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,64+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,72+8(%rsp)
vpxor %xmm3,%xmm4,%xmm4
vmovdqu 112-32(%r9),%xmm3
vaesenc %xmm15,%xmm14,%xmm14
vmovups 96-128(%rcx),%xmm15
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm1,%xmm6,%xmm6
vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1
vaesenc %xmm15,%xmm10,%xmm10
movbeq 40(%r14),%r13
vpxor %xmm2,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2
vaesenc %xmm15,%xmm11,%xmm11
movbeq 32(%r14),%r12
vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,80+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,88+8(%rsp)
vpxor %xmm5,%xmm6,%xmm6
vaesenc %xmm15,%xmm14,%xmm14
vpxor %xmm1,%xmm6,%xmm6
vmovups 112-128(%rcx),%xmm15
vpslldq $8,%xmm6,%xmm5
vpxor %xmm2,%xmm4,%xmm4
vmovdqu 16(%r11),%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm8,%xmm7,%xmm7
vaesenc %xmm15,%xmm10,%xmm10
vpxor %xmm5,%xmm4,%xmm4
movbeq 24(%r14),%r13
vaesenc %xmm15,%xmm11,%xmm11
movbeq 16(%r14),%r12
vpalignr $8,%xmm4,%xmm4,%xmm0
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
movq %r13,96+8(%rsp)
vaesenc %xmm15,%xmm12,%xmm12
movq %r12,104+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
vmovups 128-128(%rcx),%xmm1
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vmovups 144-128(%rcx),%xmm15
vaesenc %xmm1,%xmm10,%xmm10
vpsrldq $8,%xmm6,%xmm6
vaesenc %xmm1,%xmm11,%xmm11
vpxor %xmm6,%xmm7,%xmm7
vaesenc %xmm1,%xmm12,%xmm12
vpxor %xmm0,%xmm4,%xmm4
movbeq 8(%r14),%r13
vaesenc %xmm1,%xmm13,%xmm13
movbeq 0(%r14),%r12
vaesenc %xmm1,%xmm14,%xmm14
vmovups 160-128(%rcx),%xmm1
cmpl $11,%r10d
jb L$enc_tail
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovups 176-128(%rcx),%xmm15
vaesenc %xmm1,%xmm14,%xmm14
vmovups 192-128(%rcx),%xmm1
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovups 208-128(%rcx),%xmm15
vaesenc %xmm1,%xmm14,%xmm14
vmovups 224-128(%rcx),%xmm1
jmp L$enc_tail
.p2align 5
L$handle_ctr32:
vmovdqu (%r11),%xmm0
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
vpaddd 64(%r11),%xmm6,%xmm10
vpaddd %xmm5,%xmm6,%xmm11
vmovdqu 0-32(%r9),%xmm3
vpaddd %xmm5,%xmm10,%xmm12
vpshufb %xmm0,%xmm10,%xmm10
vpaddd %xmm5,%xmm11,%xmm13
vpshufb %xmm0,%xmm11,%xmm11
vpxor %xmm15,%xmm10,%xmm10
vpaddd %xmm5,%xmm12,%xmm14
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm15,%xmm11,%xmm11
vpaddd %xmm5,%xmm13,%xmm1
vpshufb %xmm0,%xmm13,%xmm13
vpshufb %xmm0,%xmm14,%xmm14
vpshufb %xmm0,%xmm1,%xmm1
jmp L$resume_ctr32
.p2align 5
L$enc_tail:
vaesenc %xmm15,%xmm9,%xmm9
vmovdqu %xmm7,16+8(%rsp)
vpalignr $8,%xmm4,%xmm4,%xmm8
vaesenc %xmm15,%xmm10,%xmm10
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
vpxor 0(%rdi),%xmm1,%xmm2
vaesenc %xmm15,%xmm11,%xmm11
vpxor 16(%rdi),%xmm1,%xmm0
vaesenc %xmm15,%xmm12,%xmm12
vpxor 32(%rdi),%xmm1,%xmm5
vaesenc %xmm15,%xmm13,%xmm13
vpxor 48(%rdi),%xmm1,%xmm6
vaesenc %xmm15,%xmm14,%xmm14
vpxor 64(%rdi),%xmm1,%xmm7
vpxor 80(%rdi),%xmm1,%xmm3
vmovdqu (%r8),%xmm1
vaesenclast %xmm2,%xmm9,%xmm9
vmovdqu 32(%r11),%xmm2
vaesenclast %xmm0,%xmm10,%xmm10
vpaddb %xmm2,%xmm1,%xmm0
movq %r13,112+8(%rsp)
leaq 96(%rdi),%rdi
prefetcht0 512(%rdi)
prefetcht0 576(%rdi)
vaesenclast %xmm5,%xmm11,%xmm11
vpaddb %xmm2,%xmm0,%xmm5
movq %r12,120+8(%rsp)
leaq 96(%rsi),%rsi
vmovdqu 0-128(%rcx),%xmm15
vaesenclast %xmm6,%xmm12,%xmm12
vpaddb %xmm2,%xmm5,%xmm6
vaesenclast %xmm7,%xmm13,%xmm13
vpaddb %xmm2,%xmm6,%xmm7
vaesenclast %xmm3,%xmm14,%xmm14
vpaddb %xmm2,%xmm7,%xmm3
addq $0x60,%rax
subq $0x6,%rdx
jc L$6x_done
vmovups %xmm9,-96(%rsi)
vpxor %xmm15,%xmm1,%xmm9
vmovups %xmm10,-80(%rsi)
vmovdqa %xmm0,%xmm10
vmovups %xmm11,-64(%rsi)
vmovdqa %xmm5,%xmm11
vmovups %xmm12,-48(%rsi)
vmovdqa %xmm6,%xmm12
vmovups %xmm13,-32(%rsi)
vmovdqa %xmm7,%xmm13
vmovups %xmm14,-16(%rsi)
vmovdqa %xmm3,%xmm14
vmovdqu 32+8(%rsp),%xmm7
jmp L$oop6x
L$6x_done:
vpxor 16+8(%rsp),%xmm8,%xmm8
vpxor %xmm4,%xmm8,%xmm8
ret
.globl _aesni_gcm_decrypt
.private_extern _aesni_gcm_decrypt
.p2align 5
_aesni_gcm_decrypt:
_CET_ENDBR
xorq %rax,%rax
cmpq $0x60,%rdx
jb L$gcm_dec_abort
pushq %rbp
movq %rsp,%rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
vzeroupper
movq 16(%rbp),%r12
vmovdqu (%r8),%xmm1
addq $-128,%rsp
movl 12(%r8),%ebx
leaq L$bswap_mask(%rip),%r11
leaq -128(%rcx),%r14
movq $0xf80,%r15
vmovdqu (%r12),%xmm8
andq $-128,%rsp
vmovdqu (%r11),%xmm0
leaq 128(%rcx),%rcx
leaq 32(%r9),%r9
movl 240-128(%rcx),%r10d
vpshufb %xmm0,%xmm8,%xmm8
andq %r15,%r14
andq %rsp,%r15
subq %r14,%r15
jc L$dec_no_key_aliasing
cmpq $768,%r15
jnc L$dec_no_key_aliasing
subq %r15,%rsp
L$dec_no_key_aliasing:
vmovdqu 80(%rdi),%xmm7
movq %rdi,%r14
vmovdqu 64(%rdi),%xmm4
leaq -192(%rdi,%rdx,1),%r15
vmovdqu 48(%rdi),%xmm5
shrq $4,%rdx
xorq %rax,%rax
vmovdqu 32(%rdi),%xmm6
vpshufb %xmm0,%xmm7,%xmm7
vmovdqu 16(%rdi),%xmm2
vpshufb %xmm0,%xmm4,%xmm4
vmovdqu (%rdi),%xmm3
vpshufb %xmm0,%xmm5,%xmm5
vmovdqu %xmm4,48(%rsp)
vpshufb %xmm0,%xmm6,%xmm6
vmovdqu %xmm5,64(%rsp)
vpshufb %xmm0,%xmm2,%xmm2
vmovdqu %xmm6,80(%rsp)
vpshufb %xmm0,%xmm3,%xmm3
vmovdqu %xmm2,96(%rsp)
vmovdqu %xmm3,112(%rsp)
call _aesni_ctr32_ghash_6x
movq 16(%rbp),%r12
vmovups %xmm9,-96(%rsi)
vmovups %xmm10,-80(%rsi)
vmovups %xmm11,-64(%rsi)
vmovups %xmm12,-48(%rsi)
vmovups %xmm13,-32(%rsi)
vmovups %xmm14,-16(%rsi)
vpshufb (%r11),%xmm8,%xmm8
vmovdqu %xmm8,(%r12)
vzeroupper
leaq -40(%rbp),%rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
L$gcm_dec_abort:
ret
.p2align 5
_aesni_ctr32_6x:
vmovdqu 0-128(%rcx),%xmm4
vmovdqu 32(%r11),%xmm2
leaq -1(%r10),%r13
vmovups 16-128(%rcx),%xmm15
leaq 32-128(%rcx),%r12
vpxor %xmm4,%xmm1,%xmm9
addl $100663296,%ebx
jc L$handle_ctr32_2
vpaddb %xmm2,%xmm1,%xmm10
vpaddb %xmm2,%xmm10,%xmm11
vpxor %xmm4,%xmm10,%xmm10
vpaddb %xmm2,%xmm11,%xmm12
vpxor %xmm4,%xmm11,%xmm11
vpaddb %xmm2,%xmm12,%xmm13
vpxor %xmm4,%xmm12,%xmm12
vpaddb %xmm2,%xmm13,%xmm14
vpxor %xmm4,%xmm13,%xmm13
vpaddb %xmm2,%xmm14,%xmm1
vpxor %xmm4,%xmm14,%xmm14
jmp L$oop_ctr32
.p2align 4
L$oop_ctr32:
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vmovups (%r12),%xmm15
leaq 16(%r12),%r12
decl %r13d
jnz L$oop_ctr32
vmovdqu (%r12),%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor 0(%rdi),%xmm3,%xmm4
vaesenc %xmm15,%xmm10,%xmm10
vpxor 16(%rdi),%xmm3,%xmm5
vaesenc %xmm15,%xmm11,%xmm11
vpxor 32(%rdi),%xmm3,%xmm6
vaesenc %xmm15,%xmm12,%xmm12
vpxor 48(%rdi),%xmm3,%xmm8
vaesenc %xmm15,%xmm13,%xmm13
vpxor 64(%rdi),%xmm3,%xmm2
vaesenc %xmm15,%xmm14,%xmm14
vpxor 80(%rdi),%xmm3,%xmm3
leaq 96(%rdi),%rdi
vaesenclast %xmm4,%xmm9,%xmm9
vaesenclast %xmm5,%xmm10,%xmm10
vaesenclast %xmm6,%xmm11,%xmm11
vaesenclast %xmm8,%xmm12,%xmm12
vaesenclast %xmm2,%xmm13,%xmm13
vaesenclast %xmm3,%xmm14,%xmm14
vmovups %xmm9,0(%rsi)
vmovups %xmm10,16(%rsi)
vmovups %xmm11,32(%rsi)
vmovups %xmm12,48(%rsi)
vmovups %xmm13,64(%rsi)
vmovups %xmm14,80(%rsi)
leaq 96(%rsi),%rsi
ret
.p2align 5
L$handle_ctr32_2:
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
vpaddd 64(%r11),%xmm6,%xmm10
vpaddd %xmm5,%xmm6,%xmm11
vpaddd %xmm5,%xmm10,%xmm12
vpshufb %xmm0,%xmm10,%xmm10
vpaddd %xmm5,%xmm11,%xmm13
vpshufb %xmm0,%xmm11,%xmm11
vpxor %xmm4,%xmm10,%xmm10
vpaddd %xmm5,%xmm12,%xmm14
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm4,%xmm11,%xmm11
vpaddd %xmm5,%xmm13,%xmm1
vpshufb %xmm0,%xmm13,%xmm13
vpxor %xmm4,%xmm12,%xmm12
vpshufb %xmm0,%xmm14,%xmm14
vpxor %xmm4,%xmm13,%xmm13
vpshufb %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm14,%xmm14
jmp L$oop_ctr32
.globl _aesni_gcm_encrypt
.private_extern _aesni_gcm_encrypt
.p2align 5
_aesni_gcm_encrypt:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+2(%rip)
#endif
xorq %rax,%rax
cmpq $288,%rdx
jb L$gcm_enc_abort
pushq %rbp
movq %rsp,%rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
vzeroupper
vmovdqu (%r8),%xmm1
addq $-128,%rsp
movl 12(%r8),%ebx
leaq L$bswap_mask(%rip),%r11
leaq -128(%rcx),%r14
movq $0xf80,%r15
leaq 128(%rcx),%rcx
vmovdqu (%r11),%xmm0
andq $-128,%rsp
movl 240-128(%rcx),%r10d
andq %r15,%r14
andq %rsp,%r15
subq %r14,%r15
jc L$enc_no_key_aliasing
cmpq $768,%r15
jnc L$enc_no_key_aliasing
subq %r15,%rsp
L$enc_no_key_aliasing:
movq %rsi,%r14
leaq -192(%rsi,%rdx,1),%r15
shrq $4,%rdx
call _aesni_ctr32_6x
vpshufb %xmm0,%xmm9,%xmm8
vpshufb %xmm0,%xmm10,%xmm2
vmovdqu %xmm8,112(%rsp)
vpshufb %xmm0,%xmm11,%xmm4
vmovdqu %xmm2,96(%rsp)
vpshufb %xmm0,%xmm12,%xmm5
vmovdqu %xmm4,80(%rsp)
vpshufb %xmm0,%xmm13,%xmm6
vmovdqu %xmm5,64(%rsp)
vpshufb %xmm0,%xmm14,%xmm7
vmovdqu %xmm6,48(%rsp)
call _aesni_ctr32_6x
movq 16(%rbp),%r12
leaq 32(%r9),%r9
vmovdqu (%r12),%xmm8
subq $12,%rdx
movq $192,%rax
vpshufb %xmm0,%xmm8,%xmm8
call _aesni_ctr32_ghash_6x
vmovdqu 32(%rsp),%xmm7
vmovdqu (%r11),%xmm0
vmovdqu 0-32(%r9),%xmm3
vpunpckhqdq %xmm7,%xmm7,%xmm1
vmovdqu 32-32(%r9),%xmm15
vmovups %xmm9,-96(%rsi)
vpshufb %xmm0,%xmm9,%xmm9
vpxor %xmm7,%xmm1,%xmm1
vmovups %xmm10,-80(%rsi)
vpshufb %xmm0,%xmm10,%xmm10
vmovups %xmm11,-64(%rsi)
vpshufb %xmm0,%xmm11,%xmm11
vmovups %xmm12,-48(%rsi)
vpshufb %xmm0,%xmm12,%xmm12
vmovups %xmm13,-32(%rsi)
vpshufb %xmm0,%xmm13,%xmm13
vmovups %xmm14,-16(%rsi)
vpshufb %xmm0,%xmm14,%xmm14
vmovdqu %xmm9,16(%rsp)
vmovdqu 48(%rsp),%xmm6
vmovdqu 16-32(%r9),%xmm0
vpunpckhqdq %xmm6,%xmm6,%xmm2
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5
vpxor %xmm6,%xmm2,%xmm2
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
vmovdqu 64(%rsp),%xmm9
vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4
vmovdqu 48-32(%r9),%xmm3
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm9,%xmm9,%xmm5
vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6
vpxor %xmm9,%xmm5,%xmm5
vpxor %xmm7,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
vmovdqu 80-32(%r9),%xmm15
vpxor %xmm1,%xmm2,%xmm2
vmovdqu 80(%rsp),%xmm1
vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7
vmovdqu 64-32(%r9),%xmm0
vpxor %xmm4,%xmm7,%xmm7
vpunpckhqdq %xmm1,%xmm1,%xmm4
vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpxor %xmm6,%xmm9,%xmm9
vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 96(%rsp),%xmm2
vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6
vmovdqu 96-32(%r9),%xmm3
vpxor %xmm7,%xmm6,%xmm6
vpunpckhqdq %xmm2,%xmm2,%xmm7
vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpxor %xmm9,%xmm1,%xmm1
vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4
vmovdqu 128-32(%r9),%xmm15
vpxor %xmm5,%xmm4,%xmm4
vpxor 112(%rsp),%xmm8,%xmm8
vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5
vmovdqu 112-32(%r9),%xmm0
vpunpckhqdq %xmm8,%xmm8,%xmm9
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2
vpxor %xmm8,%xmm9,%xmm9
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7
vpxor %xmm4,%xmm7,%xmm4
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6
vmovdqu 0-32(%r9),%xmm3
vpunpckhqdq %xmm14,%xmm14,%xmm1
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8
vpxor %xmm14,%xmm1,%xmm1
vpxor %xmm5,%xmm6,%xmm5
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9
vmovdqu 32-32(%r9),%xmm15
vpxor %xmm2,%xmm8,%xmm7
vpxor %xmm4,%xmm9,%xmm6
vmovdqu 16-32(%r9),%xmm0
vpxor %xmm5,%xmm7,%xmm9
vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4
vpxor %xmm9,%xmm6,%xmm6
vpunpckhqdq %xmm13,%xmm13,%xmm2
vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14
vpxor %xmm13,%xmm2,%xmm2
vpslldq $8,%xmm6,%xmm9
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
vpxor %xmm9,%xmm5,%xmm8
vpsrldq $8,%xmm6,%xmm6
vpxor %xmm6,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5
vmovdqu 48-32(%r9),%xmm3
vpxor %xmm4,%xmm5,%xmm5
vpunpckhqdq %xmm12,%xmm12,%xmm9
vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13
vpxor %xmm12,%xmm9,%xmm9
vpxor %xmm14,%xmm13,%xmm13
vpalignr $8,%xmm8,%xmm8,%xmm14
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
vmovdqu 80-32(%r9),%xmm15
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4
vmovdqu 64-32(%r9),%xmm0
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm11,%xmm11,%xmm1
vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12
vpxor %xmm11,%xmm1,%xmm1
vpxor %xmm13,%xmm12,%xmm12
vxorps 16(%rsp),%xmm7,%xmm7
vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9
vpxor %xmm2,%xmm9,%xmm9
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
vxorps %xmm14,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5
vmovdqu 96-32(%r9),%xmm3
vpxor %xmm4,%xmm5,%xmm5
vpunpckhqdq %xmm10,%xmm10,%xmm2
vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11
vpxor %xmm10,%xmm2,%xmm2
vpalignr $8,%xmm8,%xmm8,%xmm14
vpxor %xmm12,%xmm11,%xmm11
vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1
vmovdqu 128-32(%r9),%xmm15
vpxor %xmm9,%xmm1,%xmm1
vxorps %xmm7,%xmm14,%xmm14
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
vxorps %xmm14,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4
vmovdqu 112-32(%r9),%xmm0
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm8,%xmm8,%xmm9
vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10
vpxor %xmm8,%xmm9,%xmm9
vpxor %xmm11,%xmm10,%xmm10
vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7
vpxor %xmm4,%xmm5,%xmm5
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6
vpxor %xmm10,%xmm7,%xmm7
vpxor %xmm2,%xmm6,%xmm6
vpxor %xmm5,%xmm7,%xmm4
vpxor %xmm4,%xmm6,%xmm6
vpslldq $8,%xmm6,%xmm1
vmovdqu 16(%r11),%xmm3
vpsrldq $8,%xmm6,%xmm6
vpxor %xmm1,%xmm5,%xmm8
vpxor %xmm6,%xmm7,%xmm7
vpalignr $8,%xmm8,%xmm8,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
vpxor %xmm2,%xmm8,%xmm8
vpalignr $8,%xmm8,%xmm8,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
vpxor %xmm7,%xmm2,%xmm2
vpxor %xmm2,%xmm8,%xmm8
movq 16(%rbp),%r12
vpshufb (%r11),%xmm8,%xmm8
vmovdqu %xmm8,(%r12)
vzeroupper
leaq -40(%rbp),%rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
L$gcm_enc_abort:
ret
.section __DATA,__const
.p2align 6
L$bswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
L$poly:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
L$one_msb:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
L$two_lsb:
.byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
L$one_lsb:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 6
.text
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 10,863
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghash-neon-armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.text
.globl gcm_init_neon
.def gcm_init_neon
.type 32
.endef
.align 4
gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl gcm_gmult_neon
.def gcm_gmult_neon
.type 32
.endef
.align 4
gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl gcm_ghash_neon
.def gcm_ghash_neon
.type 32
.endef
.align 4
gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section .rodata
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mktmansour/MKT-KSA-Geolocation-Security
| 190,544
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha20_poly1305_x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.section __DATA,__const
.p2align 6
chacha20_poly1305_constants:
L$chacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
L$rol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
L$rol16:
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
L$avx2_init:
.long 0,0,0,0
L$sse_inc:
.long 1,0,0,0
L$avx2_inc:
.long 2,0,0,0,2,0,0,0
L$clamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF
.p2align 4
L$and_masks:
.byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
.text
.p2align 6
poly_hash_ad_internal:
xorq %r10,%r10
xorq %r11,%r11
xorq %r12,%r12
cmpq $13,%r8
jne L$hash_ad_loop
L$poly_fast_tls_ad:
movq (%rcx),%r10
movq 5(%rcx),%r11
shrq $24,%r11
movq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
ret
L$hash_ad_loop:
cmpq $16,%r8
jb L$hash_ad_tail
addq 0+0(%rcx),%r10
adcq 8+0(%rcx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rcx),%rcx
subq $16,%r8
jmp L$hash_ad_loop
L$hash_ad_tail:
cmpq $0,%r8
je L$hash_ad_done
xorq %r13,%r13
xorq %r14,%r14
xorq %r15,%r15
addq %r8,%rcx
L$hash_ad_tail_loop:
shldq $8,%r13,%r14
shlq $8,%r13
movzbq -1(%rcx),%r15
xorq %r15,%r13
decq %rcx
decq %r8
jne L$hash_ad_tail_loop
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$hash_ad_done:
ret
.globl _chacha20_poly1305_open_sse41
.private_extern _chacha20_poly1305_open_sse41
.p2align 6
_chacha20_poly1305_open_sse41:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
cmpq $128,%rbx
jbe L$open_sse_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm7
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movq $10,%r10
L$open_sse_init_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jne L$open_sse_init_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
pand L$clamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
L$open_sse_main_loop:
cmpq $256,%rbx
jb L$open_sse_tail
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $4,%rcx
movq %rsi,%r8
L$open_sse_main_loop_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
leaq 16(%r8),%r8
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %rcx
jge L$open_sse_main_loop_rounds
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
cmpq $-6,%rcx
jg L$open_sse_main_loop_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor 0+80(%rbp),%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp L$open_sse_main_loop
L$open_sse_tail:
testq %rbx,%rbx
jz L$open_sse_finalize
cmpq $192,%rbx
ja L$open_sse_tail_256
cmpq $128,%rbx
ja L$open_sse_tail_192
cmpq $64,%rbx
ja L$open_sse_tail_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
cmpq $16,%rcx
jb L$open_sse_tail_64_rounds
L$open_sse_tail_64_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
L$open_sse_tail_64_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
cmpq $16,%rcx
jae L$open_sse_tail_64_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_64_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_128:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movq %rbx,%rcx
andq $-16,%rcx
xorq %r8,%r8
L$open_sse_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_128_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
cmpq %rcx,%r8
jb L$open_sse_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_128_rounds
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
subq $64,%rbx
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_192:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movq %rbx,%rcx
movq $160,%r8
cmpq $160,%rcx
cmovgq %r8,%rcx
andq $-16,%rcx
xorq %r8,%r8
L$open_sse_tail_192_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_192_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
cmpq %rcx,%r8
jb L$open_sse_tail_192_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_192_rounds
cmpq $176,%rbx
jb L$open_sse_tail_192_finish
addq 0+160(%rsi),%r10
adcq 8+160(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
cmpq $192,%rbx
jb L$open_sse_tail_192_finish
addq 0+176(%rsi),%r10
adcq 8+176(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_192_finish:
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
subq $128,%rbx
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_256:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
xorq %r8,%r8
L$open_sse_tail_256_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
movdqa 0+80(%rbp),%xmm11
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
movdqa 0+80(%rbp),%xmm9
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
movdqa 0+80(%rbp),%xmm11
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
movdqa 0+80(%rbp),%xmm9
addq $16,%r8
cmpq $160,%r8
jb L$open_sse_tail_256_rounds_and_x1hash
movq %rbx,%rcx
andq $-16,%rcx
L$open_sse_tail_256_hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%r8
cmpq %rcx,%r8
jb L$open_sse_tail_256_hash
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqa 0+80(%rbp),%xmm12
subq $192,%rbx
leaq 192(%rsi),%rsi
leaq 192(%rdi),%rdi
L$open_sse_tail_64_dec_loop:
cmpq $16,%rbx
jb L$open_sse_tail_16_init
subq $16,%rbx
movdqu (%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_16_init:
movdqa %xmm0,%xmm1
L$open_sse_tail_16:
testq %rbx,%rbx
jz L$open_sse_finalize
pxor %xmm3,%xmm3
leaq -1(%rsi,%rbx,1),%rsi
movq %rbx,%r8
L$open_sse_tail_16_compose:
pslldq $1,%xmm3
pinsrb $0,(%rsi),%xmm3
subq $1,%rsi
subq $1,%r8
jnz L$open_sse_tail_16_compose
.byte 102,73,15,126,221
pextrq $1,%xmm3,%r14
pxor %xmm1,%xmm3
L$open_sse_tail_16_extract:
pextrb $0,%xmm3,(%rdi)
psrldq $1,%xmm3
addq $1,%rdi
subq $1,%rbx
jne L$open_sse_tail_16_extract
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_finalize:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
addq $288 + 0 + 32,%rsp
popq %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
ret
L$open_sse_128:
movdqu L$chacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm13,%xmm15
movq $10,%r10
L$open_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz L$open_sse_128_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd L$chacha20_consts(%rip),%xmm1
paddd L$chacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm9
paddd %xmm11,%xmm10
paddd %xmm15,%xmm13
paddd L$sse_inc(%rip),%xmm15
paddd %xmm15,%xmm14
pand L$clamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
L$open_sse_128_xor_hash:
cmpq $16,%rbx
jb L$open_sse_tail_16
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm1
movdqu %xmm1,0(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
movdqa %xmm2,%xmm13
movdqa %xmm6,%xmm2
movdqa %xmm10,%xmm6
movdqa %xmm14,%xmm10
jmp L$open_sse_128_xor_hash
.globl _chacha20_poly1305_seal_sse41
.private_extern _chacha20_poly1305_seal_sse41
.p2align 6
_chacha20_poly1305_seal_sse41:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
cmpq $128,%rbx
jbe L$seal_sse_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm14
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $10,%r10
L$seal_sse_init_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jnz L$seal_sse_init_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
pand L$clamp(%rip),%xmm3
movdqa %xmm3,0+0(%rbp)
movdqa %xmm7,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
cmpq $192,%rbx
ja L$seal_sse_main_init
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_main_init:
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 128(%rdi)
movdqu %xmm4,16 + 128(%rdi)
movdqu %xmm8,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
movq $2,%rcx
movq $8,%r8
cmpq $64,%rbx
jbe L$seal_sse_tail_64
cmpq $128,%rbx
jbe L$seal_sse_tail_128
cmpq $192,%rbx
jbe L$seal_sse_tail_192
L$seal_sse_main_loop:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
.p2align 5
L$seal_sse_main_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
leaq 16(%rdi),%rdi
decq %r8
jge L$seal_sse_main_rounds
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_main_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm14,0+80(%rbp)
movdqa %xmm14,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm14
pxor %xmm3,%xmm14
movdqu %xmm14,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm14
pxor %xmm7,%xmm14
movdqu %xmm14,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm14
pxor %xmm11,%xmm14
movdqu %xmm14,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm14
pxor %xmm15,%xmm14
movdqu %xmm14,48 + 0(%rdi)
movdqa 0+80(%rbp),%xmm14
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
cmpq $256,%rbx
ja L$seal_sse_main_loop_xor
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_main_loop_xor:
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
subq $256,%rbx
movq $6,%rcx
movq $4,%r8
cmpq $192,%rbx
jg L$seal_sse_main_loop
movq %rbx,%rcx
testq %rbx,%rbx
je L$seal_sse_128_tail_hash
movq $6,%rcx
cmpq $128,%rbx
ja L$seal_sse_tail_192
cmpq $64,%rbx
ja L$seal_sse_tail_128
L$seal_sse_tail_64:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
L$seal_sse_tail_64_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_64_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_64_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_64_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp L$seal_sse_128_tail_xor
L$seal_sse_tail_128:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
L$seal_sse_tail_128_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_128_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_128_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_128_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movq $64,%rcx
subq $64,%rbx
leaq 64(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_tail_192:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
L$seal_sse_tail_192_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_192_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_192_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_192_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
L$seal_sse_128_tail_hash:
cmpq $16,%rcx
jb L$seal_sse_128_tail_xor
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
leaq 16(%rdi),%rdi
jmp L$seal_sse_128_tail_hash
L$seal_sse_128_tail_xor:
cmpq $16,%rbx
jb L$seal_sse_tail_16
subq $16,%rbx
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,0(%rdi)
addq 0(%rdi),%r10
adcq 8(%rdi),%r11
adcq $1,%r12
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
movdqa %xmm1,%xmm12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
jmp L$seal_sse_128_tail_xor
L$seal_sse_tail_16:
testq %rbx,%rbx
jz L$process_blocks_of_extra_in
movq %rbx,%r8
movq %rbx,%rcx
leaq -1(%rsi,%rbx,1),%rsi
pxor %xmm15,%xmm15
L$seal_sse_tail_16_compose:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
decq %rcx
jne L$seal_sse_tail_16_compose
pxor %xmm0,%xmm15
movq %rbx,%rcx
movdqu %xmm15,%xmm0
L$seal_sse_tail_16_extract:
pextrb $0,%xmm0,(%rdi)
psrldq $1,%xmm0
addq $1,%rdi
subq $1,%rcx
jnz L$seal_sse_tail_16_extract
movq 288 + 0 + 32(%rsp),%r9
movq 56(%r9),%r14
movq 48(%r9),%r13
testq %r14,%r14
jz L$process_partial_block
movq $16,%r15
subq %rbx,%r15
cmpq %r15,%r14
jge L$load_extra_in
movq %r14,%r15
L$load_extra_in:
leaq -1(%r13,%r15,1),%rsi
addq %r15,%r13
subq %r15,%r14
movq %r13,48(%r9)
movq %r14,56(%r9)
addq %r15,%r8
pxor %xmm11,%xmm11
L$load_extra_load_loop:
pslldq $1,%xmm11
pinsrb $0,(%rsi),%xmm11
leaq -1(%rsi),%rsi
subq $1,%r15
jnz L$load_extra_load_loop
movq %rbx,%r15
L$load_extra_shift_loop:
pslldq $1,%xmm11
subq $1,%r15
jnz L$load_extra_shift_loop
leaq L$and_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
por %xmm11,%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$process_blocks_of_extra_in:
movq 288+32+0 (%rsp),%r9
movq 48(%r9),%rsi
movq 56(%r9),%r8
movq %r8,%rcx
shrq $4,%r8
L$process_extra_hash_loop:
jz process_extra_in_trailer
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rsi),%rsi
subq $1,%r8
jmp L$process_extra_hash_loop
process_extra_in_trailer:
andq $15,%rcx
movq %rcx,%rbx
jz L$do_length_block
leaq -1(%rsi,%rcx,1),%rsi
L$process_extra_in_trailer_load:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
subq $1,%rcx
jnz L$process_extra_in_trailer_load
L$process_partial_block:
leaq L$and_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$do_length_block:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
addq $288 + 0 + 32,%rsp
popq %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
ret
L$seal_sse_128:
movdqu L$chacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm14
movdqa %xmm14,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
movq $10,%r10
L$seal_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz L$seal_sse_128_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd L$chacha20_consts(%rip),%xmm1
paddd L$chacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm8
paddd %xmm11,%xmm9
paddd %xmm15,%xmm12
paddd L$sse_inc(%rip),%xmm15
paddd %xmm15,%xmm13
pand L$clamp(%rip),%xmm2
movdqa %xmm2,0+0(%rbp)
movdqa %xmm6,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
jmp L$seal_sse_128_tail_xor
.globl _chacha20_poly1305_open_avx2
.private_extern _chacha20_poly1305_open_avx2
.p2align 6
_chacha20_poly1305_open_avx2:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
vzeroupper
vmovdqa L$chacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd L$avx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe L$open_avx2_192
cmpq $320,%rbx
jbe L$open_avx2_320
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%r10
L$open_avx2_init_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
decq %r10
jne L$open_avx2_init_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
L$open_avx2_init_hash:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%rcx
cmpq $64,%rcx
jne L$open_avx2_init_hash
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vmovdqu %ymm0,0(%rdi)
vmovdqu %ymm4,32(%rdi)
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
subq $64,%rbx
L$open_avx2_main_loop:
cmpq $512,%rbx
jb L$open_avx2_main_loop_done
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
L$open_avx2_main_loop_rounds:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rsi,%rcx,1),%r10
adcq 8+16(%rsi,%rcx,1),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rsi,%rcx,1),%r10
adcq 8+32(%rsi,%rcx,1),%r11
adcq $1,%r12
leaq 48(%rcx),%rcx
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
cmpq $60*8,%rcx
jne L$open_avx2_main_loop_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+60*8(%rsi),%r10
adcq 8+60*8(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
addq 0+60*8+16(%rsi),%r10
adcq 8+60*8+16(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
leaq 512(%rdi),%rdi
subq $512,%rbx
jmp L$open_avx2_main_loop
L$open_avx2_main_loop_done:
testq %rbx,%rbx
vzeroupper
je L$open_sse_finalize
cmpq $384,%rbx
ja L$open_avx2_tail_512
cmpq $256,%rbx
ja L$open_avx2_tail_384
cmpq $128,%rbx
ja L$open_avx2_tail_256
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
andq $-16,%rcx
testq %rcx,%rcx
je L$open_avx2_tail_128_rounds
L$open_avx2_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_avx2_tail_128_rounds:
addq $16,%r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb L$open_avx2_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne L$open_avx2_tail_128_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_256:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $128,%rcx
shrq $4,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
L$open_avx2_tail_256_rounds_and_x1hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
L$open_avx2_tail_256_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
incq %r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
cmpq %rcx,%r8
jb L$open_avx2_tail_256_rounds_and_x1hash
cmpq $10,%r8
jne L$open_avx2_tail_256_rounds
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
L$open_avx2_tail_256_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg L$open_avx2_tail_256_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp L$open_avx2_tail_256_hash
L$open_avx2_tail_256_done:
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
subq $128,%rbx
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_384:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $256,%rcx
shrq $4,%rcx
addq $6,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
L$open_avx2_tail_384_rounds_and_x2hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
L$open_avx2_tail_384_rounds_and_x1hash:
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
incq %r8
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb L$open_avx2_tail_384_rounds_and_x2hash
cmpq $10,%r8
jne L$open_avx2_tail_384_rounds_and_x1hash
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
L$open_avx2_384_tail_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg L$open_avx2_384_tail_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp L$open_avx2_384_tail_hash
L$open_avx2_384_tail_done:
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_512:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
movq %rsi,%r8
L$open_avx2_tail_512_rounds_and_x2hash:
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
L$open_avx2_tail_512_rounds_and_x1hash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+16(%r8),%r10
adcq 8+16(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%r8),%r8
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
incq %rcx
cmpq $4,%rcx
jl L$open_avx2_tail_512_rounds_and_x2hash
cmpq $10,%rcx
jne L$open_avx2_tail_512_rounds_and_x1hash
movq %rbx,%rcx
subq $384,%rcx
andq $-16,%rcx
L$open_avx2_tail_512_hash:
testq %rcx,%rcx
je L$open_avx2_tail_512_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
subq $16,%rcx
jmp L$open_avx2_tail_512_hash
L$open_avx2_tail_512_done:
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 384(%rsi),%rsi
leaq 384(%rdi),%rdi
subq $384,%rbx
L$open_avx2_tail_128_xor:
cmpq $32,%rbx
jb L$open_avx2_tail_32_xor
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_32_xor:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb L$open_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm1
vmovdqu %xmm1,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vperm2i128 $0x11,%ymm0,%ymm0,%ymm0
vmovdqa %xmm0,%xmm1
L$open_avx2_exit:
vzeroupper
jmp L$open_sse_tail_16
L$open_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
L$open_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne L$open_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
L$open_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
L$open_avx2_short_hash_and_xor_loop:
cmpq $32,%rbx
jb L$open_avx2_short_tail_32
subq $32,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rsi),%r10
adcq 8+16(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp L$open_avx2_short_hash_and_xor_loop
L$open_avx2_short_tail_32:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb L$open_avx2_short_tail_32_exit
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm1
L$open_avx2_short_tail_32_exit:
vzeroupper
jmp L$open_sse_tail_16
L$open_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
L$open_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne L$open_avx2_320_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp L$open_avx2_short
.globl _chacha20_poly1305_seal_avx2
.private_extern _chacha20_poly1305_seal_avx2
.p2align 6
_chacha20_poly1305_seal_avx2:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
vzeroupper
vmovdqa L$chacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd L$avx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe L$seal_avx2_192
cmpq $320,%rbx
jbe L$seal_avx2_320
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm4,%ymm7
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vmovdqa %ymm8,%ymm11
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,%ymm15
vpaddd L$avx2_inc(%rip),%ymm15,%ymm14
vpaddd L$avx2_inc(%rip),%ymm14,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm15,0+256(%rbp)
movq $10,%r10
L$seal_avx2_init_rounds:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %r10
jnz L$seal_avx2_init_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vperm2i128 $0x02,%ymm3,%ymm7,%ymm15
vperm2i128 $0x13,%ymm3,%ymm7,%ymm3
vpand L$clamp(%rip),%ymm15,%ymm15
vmovdqa %ymm15,0+0(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
vpxor 0(%rsi),%ymm3,%ymm3
vpxor 32(%rsi),%ymm11,%ymm11
vmovdqu %ymm3,0(%rdi)
vmovdqu %ymm11,32(%rdi)
vperm2i128 $0x02,%ymm2,%ymm6,%ymm15
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+64(%rsi),%ymm15,%ymm15
vpxor 32+64(%rsi),%ymm2,%ymm2
vpxor 64+64(%rsi),%ymm6,%ymm6
vpxor 96+64(%rsi),%ymm10,%ymm10
vmovdqu %ymm15,0+64(%rdi)
vmovdqu %ymm2,32+64(%rdi)
vmovdqu %ymm6,64+64(%rdi)
vmovdqu %ymm10,96+64(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm15
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+192(%rsi),%ymm15,%ymm15
vpxor 32+192(%rsi),%ymm1,%ymm1
vpxor 64+192(%rsi),%ymm5,%ymm5
vpxor 96+192(%rsi),%ymm9,%ymm9
vmovdqu %ymm15,0+192(%rdi)
vmovdqu %ymm1,32+192(%rdi)
vmovdqu %ymm5,64+192(%rdi)
vmovdqu %ymm9,96+192(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm15
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm15,%ymm8
leaq 320(%rsi),%rsi
subq $320,%rbx
movq $320,%rcx
cmpq $128,%rbx
jbe L$seal_avx2_short_hash_remainder
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vpxor 64(%rsi),%ymm8,%ymm8
vpxor 96(%rsi),%ymm12,%ymm12
vmovdqu %ymm0,320(%rdi)
vmovdqu %ymm4,352(%rdi)
vmovdqu %ymm8,384(%rdi)
vmovdqu %ymm12,416(%rdi)
leaq 128(%rsi),%rsi
subq $128,%rbx
movq $8,%rcx
movq $2,%r8
cmpq $128,%rbx
jbe L$seal_avx2_tail_128
cmpq $256,%rbx
jbe L$seal_avx2_tail_256
cmpq $384,%rbx
jbe L$seal_avx2_tail_384
cmpq $512,%rbx
jbe L$seal_avx2_tail_512
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
subq $16,%rdi
movq $9,%rcx
jmp L$seal_avx2_main_loop_rounds_entry
.p2align 5
L$seal_avx2_main_loop:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%rcx
.p2align 5
L$seal_avx2_main_loop_rounds:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$seal_avx2_main_loop_rounds_entry:
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rdi),%r10
adcq 8+32(%rdi),%r11
adcq $1,%r12
leaq 48(%rdi),%rdi
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %rcx
jne L$seal_avx2_main_loop_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
subq $512,%rbx
cmpq $512,%rbx
jg L$seal_avx2_main_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
movq $10,%rcx
xorq %r8,%r8
cmpq $384,%rbx
ja L$seal_avx2_tail_512
cmpq $256,%rbx
ja L$seal_avx2_tail_384
cmpq $128,%rbx
ja L$seal_avx2_tail_256
L$seal_avx2_tail_128:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
L$seal_avx2_tail_128_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_128_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_128_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_128_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp L$seal_avx2_short_loop
L$seal_avx2_tail_256:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
L$seal_avx2_tail_256_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_256_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_256_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_256_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $128,%rcx
leaq 128(%rsi),%rsi
subq $128,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_tail_384:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
L$seal_avx2_tail_384_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_384_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_384_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_384_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $256,%rcx
leaq 256(%rsi),%rsi
subq $256,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_tail_512:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
L$seal_avx2_tail_512_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_512_rounds_and_2xhash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
addq %rax,%r15
adcq %rdx,%r9
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_512_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_512_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $384,%rcx
leaq 384(%rsi),%rsi
subq $384,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
L$seal_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne L$seal_avx2_320_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp L$seal_avx2_short
L$seal_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
L$seal_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne L$seal_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
L$seal_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
L$seal_avx2_short_hash_remainder:
cmpq $16,%rcx
jb L$seal_avx2_short_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
addq $16,%rdi
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_short_loop:
cmpq $32,%rbx
jb L$seal_avx2_short_tail
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp L$seal_avx2_short_loop
L$seal_avx2_short_tail:
cmpq $16,%rbx
jb L$seal_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm0
L$seal_avx2_exit:
vzeroupper
jmp L$seal_sse_tail_16
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 10,875
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghash-neon-armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.text
.globl _gcm_init_neon
.private_extern _gcm_init_neon
.align 4
_gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl _gcm_gmult_neon
.private_extern _gcm_gmult_neon
.align 4
_gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks@PAGE // load constants
add x9, x9, Lmasks@PAGEOFF
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl _gcm_ghash_neon
.private_extern _gcm_ghash_neon
.align 4
_gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks@PAGE // load constants
add x9, x9, Lmasks@PAGEOFF
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section __TEXT,__const
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 17,785
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/bsaes-armv7-linux32.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ https://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ See the License for the specific language governing permissions and
@ limitations under the License.
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project.
@
@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
@ of Linaro.
@ ====================================================================
@ Bit-sliced AES for ARM NEON
@
@ February 2012.
@
@ This implementation is direct adaptation of bsaes-x86_64 module for
@ ARM NEON. Except that this module is endian-neutral [in sense that
@ it can be compiled for either endianness] by courtesy of vld1.8's
@ neutrality. Initial version doesn't implement interface to OpenSSL,
@ only low-level primitives and unsupported entry points, just enough
@ to collect performance results, which for Cortex-A8 core are:
@
@ encrypt 19.5 cycles per byte processed with 128-bit key
@ decrypt 22.1 cycles per byte processed with 128-bit key
@ key conv. 440 cycles per 128-bit key/0.18 of 8x block
@
@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
@ which is [much] worse than anticipated (for further details see
@ http://www.openssl.org/~appro/Snapdragon-S4.html).
@
@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
@ manages in 20.0 cycles].
@
@ When comparing to x86_64 results keep in mind that NEON unit is
@ [mostly] single-issue and thus can't [fully] benefit from
@ instruction-level parallelism. And when comparing to aes-armv4
@ results keep in mind key schedule conversion overhead (see
@ bsaes-x86_64.pl for further details)...
@
@ <appro@openssl.org>
@ April-August 2013
@ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard.
#ifndef __KERNEL__
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
# define VFP_ABI_FRAME 0x40
#else
# define VFP_ABI_PUSH
# define VFP_ABI_POP
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define __ARM_MAX_ARCH__ 7
#endif
#ifdef __thumb__
# define adrl adr
#endif
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.text
.syntax unified @ ARMv7-capable assembler is expected to handle this
#if defined(__thumb2__) && !defined(__APPLE__)
.thumb
#else
.code 32
# undef __thumb2__
#endif
.type _bsaes_const,%object
.align 6
_bsaes_const:
.LM0ISR:@ InvShiftRows constants
.quad 0x0a0e0206070b0f03, 0x0004080c0d010509
.LISR:
.quad 0x0504070602010003, 0x0f0e0d0c080b0a09
.LISRM0:
.quad 0x01040b0e0205080f, 0x0306090c00070a0d
.LM0SR:@ ShiftRows constants
.quad 0x0a0e02060f03070b, 0x0004080c05090d01
.LSR:
.quad 0x0504070600030201, 0x0f0e0d0c0a09080b
.LSRM0:
.quad 0x0304090e00050a0f, 0x01060b0c0207080d
.LM0:
.quad 0x02060a0e03070b0f, 0x0004080c0105090d
.LREVM0SR:
.quad 0x090d01050c000408, 0x03070b0f060a0e02
.byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 6
.size _bsaes_const,.-_bsaes_const
.type _bsaes_encrypt8,%function
.align 4
_bsaes_encrypt8:
adr r6,.
vldmia r4!, {q9} @ round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0SR
#else
sub r6,r6,#_bsaes_encrypt8-.LM0SR
#endif
vldmia r6!, {q8} @ .LM0SR
_bsaes_encrypt8_alt:
veor q10, q0, q9 @ xor with round0 key
veor q11, q1, q9
vtbl.8 d0, {q10}, d16
vtbl.8 d1, {q10}, d17
veor q12, q2, q9
vtbl.8 d2, {q11}, d16
vtbl.8 d3, {q11}, d17
veor q13, q3, q9
vtbl.8 d4, {q12}, d16
vtbl.8 d5, {q12}, d17
veor q14, q4, q9
vtbl.8 d6, {q13}, d16
vtbl.8 d7, {q13}, d17
veor q15, q5, q9
vtbl.8 d8, {q14}, d16
vtbl.8 d9, {q14}, d17
veor q10, q6, q9
vtbl.8 d10, {q15}, d16
vtbl.8 d11, {q15}, d17
veor q11, q7, q9
vtbl.8 d12, {q10}, d16
vtbl.8 d13, {q10}, d17
vtbl.8 d14, {q11}, d16
vtbl.8 d15, {q11}, d17
_bsaes_encrypt8_bitslice:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q6, #1
vshr.u64 q11, q4, #1
veor q10, q10, q7
veor q11, q11, q5
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #1
veor q5, q5, q11
vshl.u64 q11, q11, #1
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q2, #1
vshr.u64 q11, q0, #1
veor q10, q10, q3
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q3, q3, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q5, #2
vshr.u64 q11, q4, #2
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q9
vand q11, q11, q9
veor q7, q7, q10
vshl.u64 q10, q10, #2
veor q6, q6, q11
vshl.u64 q11, q11, #2
veor q5, q5, q10
veor q4, q4, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q3
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q3, q3, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q3, #4
vshr.u64 q11, q2, #4
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q6, q6, q11
vshl.u64 q11, q11, #4
veor q3, q3, q10
veor q2, q2, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q5
veor q11, q11, q4
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q4, q4, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
sub r5,r5,#1
b .Lenc_sbox
.align 4
.Lenc_loop:
vldmia r4!, {q8,q9,q10,q11}
veor q8, q8, q0
veor q9, q9, q1
vtbl.8 d0, {q8}, d24
vtbl.8 d1, {q8}, d25
vldmia r4!, {q8}
veor q10, q10, q2
vtbl.8 d2, {q9}, d24
vtbl.8 d3, {q9}, d25
vldmia r4!, {q9}
veor q11, q11, q3
vtbl.8 d4, {q10}, d24
vtbl.8 d5, {q10}, d25
vldmia r4!, {q10}
vtbl.8 d6, {q11}, d24
vtbl.8 d7, {q11}, d25
vldmia r4!, {q11}
veor q8, q8, q4
veor q9, q9, q5
vtbl.8 d8, {q8}, d24
vtbl.8 d9, {q8}, d25
veor q10, q10, q6
vtbl.8 d10, {q9}, d24
vtbl.8 d11, {q9}, d25
veor q11, q11, q7
vtbl.8 d12, {q10}, d24
vtbl.8 d13, {q10}, d25
vtbl.8 d14, {q11}, d24
vtbl.8 d15, {q11}, d25
.Lenc_sbox:
veor q2, q2, q1
veor q5, q5, q6
veor q3, q3, q0
veor q6, q6, q2
veor q5, q5, q0
veor q6, q6, q3
veor q3, q3, q7
veor q7, q7, q5
veor q3, q3, q4
veor q4, q4, q5
veor q2, q2, q7
veor q3, q3, q1
veor q1, q1, q5
veor q11, q7, q4
veor q10, q1, q2
veor q9, q5, q3
veor q13, q2, q4
vmov q8, q10
veor q12, q6, q0
vorr q10, q10, q9
veor q15, q11, q8
vand q14, q11, q12
vorr q11, q11, q12
veor q12, q12, q9
vand q8, q8, q9
veor q9, q3, q0
vand q15, q15, q12
vand q13, q13, q9
veor q9, q7, q1
veor q12, q5, q6
veor q11, q11, q13
veor q10, q10, q13
vand q13, q9, q12
vorr q9, q9, q12
veor q11, q11, q15
veor q8, q8, q13
veor q10, q10, q14
veor q9, q9, q15
veor q8, q8, q14
vand q12, q2, q3
veor q9, q9, q14
vand q13, q4, q0
vand q14, q1, q5
vorr q15, q7, q6
veor q11, q11, q12
veor q9, q9, q14
veor q8, q8, q15
veor q10, q10, q13
@ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
@ new smaller inversion
vand q14, q11, q9
vmov q12, q8
veor q13, q10, q14
veor q15, q8, q14
veor q14, q8, q14 @ q14=q15
vbsl q13, q9, q8
vbsl q15, q11, q10
veor q11, q11, q10
vbsl q12, q13, q14
vbsl q8, q14, q13
vand q14, q12, q15
veor q9, q9, q8
veor q14, q14, q11
veor q12, q6, q0
veor q8, q5, q3
veor q10, q15, q14
vand q10, q10, q6
veor q6, q6, q5
vand q11, q5, q15
vand q6, q6, q14
veor q5, q11, q10
veor q6, q6, q11
veor q15, q15, q13
veor q14, q14, q9
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q0
veor q12, q12, q8
veor q0, q0, q3
vand q8, q8, q15
vand q3, q3, q13
vand q12, q12, q14
vand q0, q0, q9
veor q8, q8, q12
veor q0, q0, q3
veor q12, q12, q11
veor q3, q3, q10
veor q6, q6, q12
veor q0, q0, q12
veor q5, q5, q8
veor q3, q3, q8
veor q12, q7, q4
veor q8, q1, q2
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q4
veor q12, q12, q8
veor q4, q4, q2
vand q8, q8, q15
vand q2, q2, q13
vand q12, q12, q14
vand q4, q4, q9
veor q8, q8, q12
veor q4, q4, q2
veor q12, q12, q11
veor q2, q2, q10
veor q15, q15, q13
veor q14, q14, q9
veor q10, q15, q14
vand q10, q10, q7
veor q7, q7, q1
vand q11, q1, q15
vand q7, q7, q14
veor q1, q11, q10
veor q7, q7, q11
veor q7, q7, q12
veor q4, q4, q12
veor q1, q1, q8
veor q2, q2, q8
veor q7, q7, q0
veor q1, q1, q6
veor q6, q6, q0
veor q4, q4, q7
veor q0, q0, q1
veor q1, q1, q5
veor q5, q5, q2
veor q2, q2, q3
veor q3, q3, q5
veor q4, q4, q5
veor q6, q6, q3
subs r5,r5,#1
bcc .Lenc_done
vext.8 q8, q0, q0, #12 @ x0 <<< 32
vext.8 q9, q1, q1, #12
veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
vext.8 q10, q4, q4, #12
veor q1, q1, q9
vext.8 q11, q6, q6, #12
veor q4, q4, q10
vext.8 q12, q3, q3, #12
veor q6, q6, q11
vext.8 q13, q7, q7, #12
veor q3, q3, q12
vext.8 q14, q2, q2, #12
veor q7, q7, q13
vext.8 q15, q5, q5, #12
veor q2, q2, q14
veor q9, q9, q0
veor q5, q5, q15
vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
veor q10, q10, q1
veor q8, q8, q5
veor q9, q9, q5
vext.8 q1, q1, q1, #8
veor q13, q13, q3
veor q0, q0, q8
veor q14, q14, q7
veor q1, q1, q9
vext.8 q8, q3, q3, #8
veor q12, q12, q6
vext.8 q9, q7, q7, #8
veor q15, q15, q2
vext.8 q3, q6, q6, #8
veor q11, q11, q4
vext.8 q7, q5, q5, #8
veor q12, q12, q5
vext.8 q6, q2, q2, #8
veor q11, q11, q5
vext.8 q2, q4, q4, #8
veor q5, q9, q13
veor q4, q8, q12
veor q3, q3, q11
veor q7, q7, q15
veor q6, q6, q14
@ vmov q4, q8
veor q2, q2, q10
@ vmov q5, q9
vldmia r6, {q12} @ .LSR
ite eq @ Thumb2 thing, samity check in ARM
addeq r6,r6,#0x10
bne .Lenc_loop
vldmia r6, {q12} @ .LSRM0
b .Lenc_loop
.align 4
.Lenc_done:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q2, #1
vshr.u64 q11, q3, #1
veor q10, q10, q5
veor q11, q11, q7
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #1
veor q7, q7, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q3, q3, q11
vshr.u64 q10, q4, #1
vshr.u64 q11, q0, #1
veor q10, q10, q6
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q6, q6, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q4, q4, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q7, #2
vshr.u64 q11, q3, #2
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q5, q5, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q7, q7, q10
veor q3, q3, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q6
veor q11, q11, q4
vand q10, q10, q9
vand q11, q11, q9
veor q6, q6, q10
vshl.u64 q10, q10, #2
veor q4, q4, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q6, #4
vshr.u64 q11, q4, #4
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q2, q2, q11
vshl.u64 q11, q11, #4
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q7
veor q11, q11, q3
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q3, q3, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
vldmia r4, {q8} @ last round key
veor q4, q4, q8
veor q6, q6, q8
veor q3, q3, q8
veor q7, q7, q8
veor q2, q2, q8
veor q5, q5, q8
veor q0, q0, q8
veor q1, q1, q8
bx lr
.size _bsaes_encrypt8,.-_bsaes_encrypt8
.type _bsaes_key_convert,%function
.align 4
_bsaes_key_convert:
adr r6,.
vld1.8 {q7}, [r4]! @ load round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0
#else
sub r6,r6,#_bsaes_key_convert-.LM0
#endif
vld1.8 {q15}, [r4]! @ load round 1 key
vmov.i8 q8, #0x01 @ bit masks
vmov.i8 q9, #0x02
vmov.i8 q10, #0x04
vmov.i8 q11, #0x08
vmov.i8 q12, #0x10
vmov.i8 q13, #0x20
vldmia r6, {q14} @ .LM0
#ifdef __ARMEL__
vrev32.8 q7, q7
vrev32.8 q15, q15
#endif
sub r5,r5,#1
vstmia r12!, {q7} @ save round 0 key
b .Lkey_loop
.align 4
.Lkey_loop:
vtbl.8 d14,{q15},d28
vtbl.8 d15,{q15},d29
vmov.i8 q6, #0x40
vmov.i8 q15, #0x80
vtst.8 q0, q7, q8
vtst.8 q1, q7, q9
vtst.8 q2, q7, q10
vtst.8 q3, q7, q11
vtst.8 q4, q7, q12
vtst.8 q5, q7, q13
vtst.8 q6, q7, q6
vtst.8 q7, q7, q15
vld1.8 {q15}, [r4]! @ load next round key
vmvn q0, q0 @ "pnot"
vmvn q1, q1
vmvn q5, q5
vmvn q6, q6
#ifdef __ARMEL__
vrev32.8 q15, q15
#endif
subs r5,r5,#1
vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key
bne .Lkey_loop
vmov.i8 q7,#0x63 @ compose .L63
@ don't save last round key
bx lr
.size _bsaes_key_convert,.-_bsaes_key_convert
.globl bsaes_ctr32_encrypt_blocks
.hidden bsaes_ctr32_encrypt_blocks
.type bsaes_ctr32_encrypt_blocks,%function
.align 5
bsaes_ctr32_encrypt_blocks:
@ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this
@ out to retain a constant-time implementation.
mov ip, sp
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr}
VFP_ABI_PUSH
ldr r8, [ip] @ ctr is 1st arg on the stack
sub sp, sp, #0x10 @ scratch space to carry over the ctr
mov r9, sp @ save sp
ldr r10, [r3, #240] @ get # of rounds
#ifndef BSAES_ASM_EXTENDED_KEY
@ allocate the key schedule on the stack
sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
add r12, #96 @ size of bit-sliced key schedule
@ populate the key schedule
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
mov sp, r12 @ sp is sp
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
vld1.8 {q0}, [r8] @ load counter
#ifdef __APPLE__
mov r8, #:lower16:(.LREVM0SR-.LM0)
add r8, r6, r8
#else
add r8, r6, #.LREVM0SR-.LM0 @ borrow r8
#endif
vldmia sp, {q4} @ load round0 key
#else
ldr r12, [r3, #244]
eors r12, #1
beq 0f
@ populate the key schedule
str r12, [r3, #244]
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
add r12, r3, #248 @ pass key schedule
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
.align 2
add r12, r3, #248
vld1.8 {q0}, [r8] @ load counter
adrl r8, .LREVM0SR @ borrow r8
vldmia r12, {q4} @ load round0 key
sub sp, #0x10 @ place for adjusted round0 key
#endif
vmov.i32 q8,#1 @ compose 1<<96
veor q9,q9,q9
vrev32.8 q0,q0
vext.8 q8,q9,q8,#4
vrev32.8 q4,q4
vadd.u32 q9,q8,q8 @ compose 2<<96
vstmia sp, {q4} @ save adjusted round0 key
b .Lctr_enc_loop
.align 4
.Lctr_enc_loop:
vadd.u32 q10, q8, q9 @ compose 3<<96
vadd.u32 q1, q0, q8 @ +1
vadd.u32 q2, q0, q9 @ +2
vadd.u32 q3, q0, q10 @ +3
vadd.u32 q4, q1, q10
vadd.u32 q5, q2, q10
vadd.u32 q6, q3, q10
vadd.u32 q7, q4, q10
vadd.u32 q10, q5, q10 @ next counter
@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
@ to flip byte order in 32-bit counter
vldmia sp, {q9} @ load round0 key
#ifndef BSAES_ASM_EXTENDED_KEY
add r4, sp, #0x10 @ pass next round key
#else
add r4, r3, #264
#endif
vldmia r8, {q8} @ .LREVM0SR
mov r5, r10 @ pass rounds
vstmia r9, {q10} @ save next counter
#ifdef __APPLE__
mov r6, #:lower16:(.LREVM0SR-.LSR)
sub r6, r8, r6
#else
sub r6, r8, #.LREVM0SR-.LSR @ pass constants
#endif
bl _bsaes_encrypt8_alt
subs r2, r2, #8
blo .Lctr_enc_loop_done
vld1.8 {q8,q9}, [r0]! @ load input
vld1.8 {q10,q11}, [r0]!
veor q0, q8
veor q1, q9
vld1.8 {q12,q13}, [r0]!
veor q4, q10
veor q6, q11
vld1.8 {q14,q15}, [r0]!
veor q3, q12
vst1.8 {q0,q1}, [r1]! @ write output
veor q7, q13
veor q2, q14
vst1.8 {q4}, [r1]!
veor q5, q15
vst1.8 {q6}, [r1]!
vmov.i32 q8, #1 @ compose 1<<96
vst1.8 {q3}, [r1]!
veor q9, q9, q9
vst1.8 {q7}, [r1]!
vext.8 q8, q9, q8, #4
vst1.8 {q2}, [r1]!
vadd.u32 q9,q8,q8 @ compose 2<<96
vst1.8 {q5}, [r1]!
vldmia r9, {q0} @ load counter
bne .Lctr_enc_loop
b .Lctr_enc_done
.align 4
.Lctr_enc_loop_done:
add r2, r2, #8
vld1.8 {q8}, [r0]! @ load input
veor q0, q8
vst1.8 {q0}, [r1]! @ write output
cmp r2, #2
blo .Lctr_enc_done
vld1.8 {q9}, [r0]!
veor q1, q9
vst1.8 {q1}, [r1]!
beq .Lctr_enc_done
vld1.8 {q10}, [r0]!
veor q4, q10
vst1.8 {q4}, [r1]!
cmp r2, #4
blo .Lctr_enc_done
vld1.8 {q11}, [r0]!
veor q6, q11
vst1.8 {q6}, [r1]!
beq .Lctr_enc_done
vld1.8 {q12}, [r0]!
veor q3, q12
vst1.8 {q3}, [r1]!
cmp r2, #6
blo .Lctr_enc_done
vld1.8 {q13}, [r0]!
veor q7, q13
vst1.8 {q7}, [r1]!
beq .Lctr_enc_done
vld1.8 {q14}, [r0]
veor q2, q14
vst1.8 {q2}, [r1]!
.Lctr_enc_done:
vmov.i32 q0, #0
vmov.i32 q1, #0
#ifndef BSAES_ASM_EXTENDED_KEY
.Lctr_enc_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r9
bne .Lctr_enc_bzero
#else
vstmia sp, {q0,q1}
#endif
mov sp, r9
add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
VFP_ABI_POP
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return
@ OpenSSL contains aes_nohw_* fallback code here. We patch this
@ out to retain a constant-time implementation.
.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 70,675
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha256-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl sha256_block_data_order_nohw
.hidden sha256_block_data_order_nohw
.type sha256_block_data_order_nohw,@function
.align 16
sha256_block_data_order_nohw:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $64+32,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp .Lloop
.align 16
.Lloop:
movl %ebx,%edi
leaq K256(%rip),%rbp
xorl %ecx,%edi
movl 0(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 4(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 8(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 12(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 16(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 20(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 24(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 28(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
addl %r14d,%eax
movl 32(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 36(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 40(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 44(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 48(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 52(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 56(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 60(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
movl 4(%rsp),%r13d
movl 56(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 36(%rsp),%r12d
addl 0(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 8(%rsp),%r13d
movl 60(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 40(%rsp),%r12d
addl 4(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 12(%rsp),%r13d
movl 0(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 44(%rsp),%r12d
addl 8(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 16(%rsp),%r13d
movl 4(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 48(%rsp),%r12d
addl 12(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 20(%rsp),%r13d
movl 8(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 52(%rsp),%r12d
addl 16(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 24(%rsp),%r13d
movl 12(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 56(%rsp),%r12d
addl 20(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 28(%rsp),%r13d
movl 16(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 60(%rsp),%r12d
addl 24(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 32(%rsp),%r13d
movl 20(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 0(%rsp),%r12d
addl 28(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
movl 36(%rsp),%r13d
movl 24(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 4(%rsp),%r12d
addl 32(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 40(%rsp),%r13d
movl 28(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 8(%rsp),%r12d
addl 36(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 44(%rsp),%r13d
movl 32(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 12(%rsp),%r12d
addl 40(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 48(%rsp),%r13d
movl 36(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 16(%rsp),%r12d
addl 44(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 52(%rsp),%r13d
movl 40(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 20(%rsp),%r12d
addl 48(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 56(%rsp),%r13d
movl 44(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 24(%rsp),%r12d
addl 52(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 60(%rsp),%r13d
movl 48(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 28(%rsp),%r12d
addl 56(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 0(%rsp),%r13d
movl 52(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 32(%rsp),%r12d
addl 60(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
cmpb $0,3(%rbp)
jnz .Lrounds_16_xx
movq 64+0(%rsp),%rdi
addl %r14d,%eax
leaq 64(%rsi),%rsi
addl 0(%rdi),%eax
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue:
ret
.cfi_endproc
.size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw
.section .rodata
.align 64
.type K256,@object
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl sha256_block_data_order_hw
.hidden sha256_block_data_order_hw
.type sha256_block_data_order_hw,@function
.align 64
sha256_block_data_order_hw:
.cfi_startproc
_CET_ENDBR
leaq K256+128(%rip),%rcx
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa 512-128(%rcx),%xmm7
pshufd $0x1b,%xmm1,%xmm0
pshufd $0xb1,%xmm1,%xmm1
pshufd $0x1b,%xmm2,%xmm2
movdqa %xmm7,%xmm8
.byte 102,15,58,15,202,8
punpcklqdq %xmm0,%xmm2
jmp .Loop_shaext
.align 16
.Loop_shaext:
movdqu (%rsi),%xmm3
movdqu 16(%rsi),%xmm4
movdqu 32(%rsi),%xmm5
.byte 102,15,56,0,223
movdqu 48(%rsi),%xmm6
movdqa 0-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 102,15,56,0,231
movdqa %xmm2,%xmm10
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
nop
movdqa %xmm1,%xmm9
.byte 15,56,203,202
movdqa 32-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 102,15,56,0,239
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
leaq 64(%rsi),%rsi
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 64-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 102,15,56,0,247
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 96-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 128-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 160-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 192-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 224-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 256-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 288-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 320-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 352-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 384-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 416-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
.byte 15,56,203,202
paddd %xmm7,%xmm6
movdqa 448-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
.byte 15,56,205,245
movdqa %xmm8,%xmm7
.byte 15,56,203,202
movdqa 480-128(%rcx),%xmm0
paddd %xmm6,%xmm0
nop
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
decq %rdx
nop
.byte 15,56,203,202
paddd %xmm10,%xmm2
paddd %xmm9,%xmm1
jnz .Loop_shaext
pshufd $0xb1,%xmm2,%xmm2
pshufd $0x1b,%xmm1,%xmm7
pshufd $0xb1,%xmm1,%xmm1
punpckhqdq %xmm2,%xmm1
.byte 102,15,58,15,215,8
movdqu %xmm1,(%rdi)
movdqu %xmm2,16(%rdi)
ret
.cfi_endproc
.size sha256_block_data_order_hw,.-sha256_block_data_order_hw
.globl sha256_block_data_order_ssse3
.hidden sha256_block_data_order_ssse3
.type sha256_block_data_order_ssse3,@function
.align 64
sha256_block_data_order_ssse3:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue_ssse3:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp .Lloop_ssse3
.align 16
.Lloop_ssse3:
movdqa K256+512(%rip),%xmm7
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
.byte 102,15,56,0,199
movdqu 48(%rsi),%xmm3
leaq K256(%rip),%rbp
.byte 102,15,56,0,207
movdqa 0(%rbp),%xmm4
movdqa 32(%rbp),%xmm5
.byte 102,15,56,0,215
paddd %xmm0,%xmm4
movdqa 64(%rbp),%xmm6
.byte 102,15,56,0,223
movdqa 96(%rbp),%xmm7
paddd %xmm1,%xmm5
paddd %xmm2,%xmm6
paddd %xmm3,%xmm7
movdqa %xmm4,0(%rsp)
movl %eax,%r14d
movdqa %xmm5,16(%rsp)
movl %ebx,%edi
movdqa %xmm6,32(%rsp)
xorl %ecx,%edi
movdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lssse3_00_47
.align 16
.Lssse3_00_47:
subq $-128,%rbp
rorl $14,%r13d
movdqa %xmm1,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm3,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,224,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,250,4
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm3,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 4(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm0
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm0
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm0,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 0(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm0,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,0(%rsp)
rorl $14,%r13d
movdqa %xmm2,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm0,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,225,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,251,4
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm0,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 20(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm1
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm1
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm1,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 32(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm1,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,16(%rsp)
rorl $14,%r13d
movdqa %xmm3,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm1,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,226,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,248,4
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm1,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 36(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm2
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm2
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm2,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 64(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm2,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,32(%rsp)
rorl $14,%r13d
movdqa %xmm0,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm2,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,227,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,249,4
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm2,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 52(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm3
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm3
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm3,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 96(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm3,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne .Lssse3_00_47
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop_ssse3
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_ssse3:
ret
.cfi_endproc
.size sha256_block_data_order_ssse3,.-sha256_block_data_order_ssse3
.globl sha256_block_data_order_avx
.hidden sha256_block_data_order_avx
.type sha256_block_data_order_avx,@function
.align 64
sha256_block_data_order_avx:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue_avx:
vzeroupper
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
vmovdqa K256+512+32(%rip),%xmm8
vmovdqa K256+512+64(%rip),%xmm9
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa K256+512(%rip),%xmm7
vmovdqu 0(%rsi),%xmm0
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm7,%xmm0,%xmm0
leaq K256(%rip),%rbp
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd 0(%rbp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 32(%rbp),%xmm1,%xmm5
vpaddd 64(%rbp),%xmm2,%xmm6
vpaddd 96(%rbp),%xmm3,%xmm7
vmovdqa %xmm4,0(%rsp)
movl %eax,%r14d
vmovdqa %xmm5,16(%rsp)
movl %ebx,%edi
vmovdqa %xmm6,32(%rsp)
xorl %ecx,%edi
vmovdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
subq $-128,%rbp
vpalignr $4,%xmm0,%xmm1,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm2,%xmm3,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm0,%xmm0
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm3,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm0,%xmm0
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm0,%xmm0
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
vpshufd $80,%xmm0,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm0,%xmm0
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 0(%rbp),%xmm0,%xmm6
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,0(%rsp)
vpalignr $4,%xmm1,%xmm2,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm3,%xmm0,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm1,%xmm1
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm0,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm1,%xmm1
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm1,%xmm1
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
vpshufd $80,%xmm1,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm1,%xmm1
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 32(%rbp),%xmm1,%xmm6
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,16(%rsp)
vpalignr $4,%xmm2,%xmm3,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm0,%xmm1,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm2,%xmm2
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm1,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm2,%xmm2
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm2,%xmm2
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
vpshufd $80,%xmm2,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm2,%xmm2
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 64(%rbp),%xmm2,%xmm6
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,32(%rsp)
vpalignr $4,%xmm3,%xmm0,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm1,%xmm2,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm3,%xmm3
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm2,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm3,%xmm3
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm3,%xmm3
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
vpshufd $80,%xmm3,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm3,%xmm3
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 96(%rbp),%xmm3,%xmm6
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne .Lavx_00_47
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop_avx
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
vzeroupper
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx:
ret
.cfi_endproc
.size sha256_block_data_order_avx,.-sha256_block_data_order_avx
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 4,266
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghashv8-armx-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,%function
.align 4
gcm_init_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.size gcm_init_clmul,.-gcm_init_clmul
.globl gcm_gmult_clmul
.hidden gcm_gmult_clmul
.type gcm_gmult_clmul,%function
.align 4
gcm_gmult_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.size gcm_gmult_clmul,.-gcm_gmult_clmul
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 4,229
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/x86-mont-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl bn_mul_mont
.hidden bn_mul_mont
.type bn_mul_mont,@function
.align 16
bn_mul_mont:
.L_bn_mul_mont_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
movl 40(%esp),%edi
leal 20(%esp),%esi
leal 24(%esp),%edx
addl $2,%edi
negl %edi
leal -32(%esp,%edi,4),%ebp
negl %edi
movl %ebp,%eax
subl %edx,%eax
andl $2047,%eax
subl %eax,%ebp
xorl %ebp,%edx
andl $2048,%edx
xorl $2048,%edx
subl %edx,%ebp
andl $-64,%ebp
movl %esp,%eax
subl %ebp,%eax
andl $-4096,%eax
movl %esp,%edx
leal (%ebp,%eax,1),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L000page_walk
jmp .L001page_walk_done
.align 16
.L000page_walk:
leal -4096(%esp),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L000page_walk
.L001page_walk_done:
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%ebp
movl 16(%esi),%esi
movl (%esi),%esi
movl %eax,4(%esp)
movl %ebx,8(%esp)
movl %ecx,12(%esp)
movl %ebp,16(%esp)
movl %esi,20(%esp)
leal -3(%edi),%ebx
movl %edx,24(%esp)
movl $-1,%eax
movd %eax,%mm7
movl 8(%esp),%esi
movl 12(%esp),%edi
movl 16(%esp),%ebp
xorl %edx,%edx
xorl %ecx,%ecx
movd (%edi),%mm4
movd (%esi),%mm5
movd (%ebp),%mm3
pmuludq %mm4,%mm5
movq %mm5,%mm2
movq %mm5,%mm0
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
incl %ecx
.align 16
.L0021st:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
leal 1(%ecx),%ecx
cmpl %ebx,%ecx
jl .L0021st
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm2,%mm3
movq %mm3,32(%esp,%ebx,4)
incl %edx
.L003outer:
xorl %ecx,%ecx
movd (%edi,%edx,4),%mm4
movd (%esi),%mm5
movd 32(%esp),%mm6
movd (%ebp),%mm3
pmuludq %mm4,%mm5
paddq %mm6,%mm5
movq %mm5,%mm0
movq %mm5,%mm2
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 36(%esp),%mm6
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm6,%mm2
incl %ecx
decl %ebx
.L004inner:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
movd 36(%esp,%ecx,4),%mm6
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
paddq %mm6,%mm2
decl %ebx
leal 1(%ecx),%ecx
jnz .L004inner
movl %ecx,%ebx
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
movd 36(%esp,%ebx,4),%mm6
paddq %mm2,%mm3
paddq %mm6,%mm3
movq %mm3,32(%esp,%ebx,4)
leal 1(%edx),%edx
cmpl %ebx,%edx
jle .L003outer
emms
jmp .L005common_tail
.align 16
.L005common_tail:
movl 16(%esp),%ebp
movl 4(%esp),%edi
leal 32(%esp),%esi
movl (%esi),%eax
movl %ebx,%ecx
xorl %edx,%edx
.align 16
.L006sub:
sbbl (%ebp,%edx,4),%eax
movl %eax,(%edi,%edx,4)
decl %ecx
movl 4(%esi,%edx,4),%eax
leal 1(%edx),%edx
jge .L006sub
sbbl $0,%eax
movl $-1,%edx
xorl %eax,%edx
jmp .L007copy
.align 16
.L007copy:
movl 32(%esp,%ebx,4),%esi
movl (%edi,%ebx,4),%ebp
movl %ecx,32(%esp,%ebx,4)
andl %eax,%esi
andl %edx,%ebp
orl %esi,%ebp
movl %ebp,(%edi,%ebx,4)
decl %ebx
jge .L007copy
movl 24(%esp),%esp
movl $1,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_mul_mont,.-.L_bn_mul_mont_begin
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
.byte 111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 42,856
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha512-armv4-linux32.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ https://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ See the License for the specific language governing permissions and
@ limitations under the License.
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project.
@ ====================================================================
@ SHA512 block procedure for ARMv4. September 2007.
@ This code is ~4.5 (four and a half) times faster than code generated
@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
@ Xscale PXA250 core].
@
@ July 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
@ Cortex A8 core and ~40 cycles per processed byte.
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 7%
@ improvement on Coxtex A8 core and ~38 cycles per byte.
@ March 2011.
@
@ Add NEON implementation. On Cortex A8 it was measured to process
@ one byte in 23.3 cycles or ~60% faster than integer-only code.
@ August 2012.
@
@ Improve NEON performance by 12% on Snapdragon S4. In absolute
@ terms it's 22.6 cycles per byte, which is disappointing result.
@ Technical writers asserted that 3-way S4 pipeline can sustain
@ multiple NEON instructions per cycle, but dual NEON issue could
@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
@ for further details. On side note Cortex-A15 processes one byte in
@ 16 cycles.
@ Byte order [in]dependence. =========================================
@
@ Originally caller was expected to maintain specific *dword* order in
@ h[0-7], namely with most significant dword at *lower* address, which
@ was reflected in below two parameters as 0 and 4. Now caller is
@ expected to maintain native byte order for whole 64-bit values.
#ifndef __KERNEL__
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
#else
# define __ARM_MAX_ARCH__ 7
# define VFP_ABI_PUSH
# define VFP_ABI_POP
#endif
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.arch armv7-a
#ifdef __ARMEL__
# define LO 0
# define HI 4
# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
#else
# define HI 0
# define LO 4
# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
#endif
.text
#if defined(__thumb2__)
.syntax unified
.thumb
# define adrl adr
#else
.code 32
#endif
.type K512,%object
.align 5
K512:
WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.size K512,.-K512
.globl sha512_block_data_order_nohw
.hidden sha512_block_data_order_nohw
.type sha512_block_data_order_nohw,%function
sha512_block_data_order_nohw:
add r2,r1,r2,lsl#7 @ len to point at the end of inp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
adr r14,K512
sub sp,sp,#9*8
ldr r7,[r0,#32+LO]
ldr r8,[r0,#32+HI]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
.Loop:
str r9, [sp,#48+0]
str r10, [sp,#48+4]
str r11, [sp,#56+0]
str r12, [sp,#56+4]
ldr r5,[r0,#0+LO]
ldr r6,[r0,#0+HI]
ldr r3,[r0,#8+LO]
ldr r4,[r0,#8+HI]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
str r3,[sp,#8+0]
str r4,[sp,#8+4]
str r9, [sp,#16+0]
str r10, [sp,#16+4]
str r11, [sp,#24+0]
str r12, [sp,#24+4]
ldr r3,[r0,#40+LO]
ldr r4,[r0,#40+HI]
str r3,[sp,#40+0]
str r4,[sp,#40+4]
.L00_15:
#if __ARM_ARCH<7
ldrb r3,[r1,#7]
ldrb r9, [r1,#6]
ldrb r10, [r1,#5]
ldrb r11, [r1,#4]
ldrb r4,[r1,#3]
ldrb r12, [r1,#2]
orr r3,r3,r9,lsl#8
ldrb r9, [r1,#1]
orr r3,r3,r10,lsl#16
ldrb r10, [r1],#8
orr r3,r3,r11,lsl#24
orr r4,r4,r12,lsl#8
orr r4,r4,r9,lsl#16
orr r4,r4,r10,lsl#24
#else
ldr r3,[r1,#4]
ldr r4,[r1],#8
#ifdef __ARMEL__
rev r3,r3
rev r4,r4
#endif
#endif
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#148
ldr r12,[sp,#16+0] @ c.lo
#if __ARM_ARCH>=7
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
tst r14,#1
beq .L00_15
ldr r9,[sp,#184+0]
ldr r10,[sp,#184+4]
bic r14,r14,#1
.L16_79:
@ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
@ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
@ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
mov r3,r9,lsr#1
ldr r11,[sp,#80+0]
mov r4,r10,lsr#1
ldr r12,[sp,#80+4]
eor r3,r3,r10,lsl#31
eor r4,r4,r9,lsl#31
eor r3,r3,r9,lsr#8
eor r4,r4,r10,lsr#8
eor r3,r3,r10,lsl#24
eor r4,r4,r9,lsl#24
eor r3,r3,r9,lsr#7
eor r4,r4,r10,lsr#7
eor r3,r3,r10,lsl#25
@ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
@ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
@ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
mov r9,r11,lsr#19
mov r10,r12,lsr#19
eor r9,r9,r12,lsl#13
eor r10,r10,r11,lsl#13
eor r9,r9,r12,lsr#29
eor r10,r10,r11,lsr#29
eor r9,r9,r11,lsl#3
eor r10,r10,r12,lsl#3
eor r9,r9,r11,lsr#6
eor r10,r10,r12,lsr#6
ldr r11,[sp,#120+0]
eor r9,r9,r12,lsl#26
ldr r12,[sp,#120+4]
adds r3,r3,r9
ldr r9,[sp,#192+0]
adc r4,r4,r10
ldr r10,[sp,#192+4]
adds r3,r3,r11
adc r4,r4,r12
adds r3,r3,r9
adc r4,r4,r10
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#23
ldr r12,[sp,#16+0] @ c.lo
#if __ARM_ARCH>=7
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
#if __ARM_ARCH>=7
ittt eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r9,[sp,#184+0]
ldreq r10,[sp,#184+4]
beq .L16_79
bic r14,r14,#1
ldr r3,[sp,#8+0]
ldr r4,[sp,#8+4]
ldr r9, [r0,#0+LO]
ldr r10, [r0,#0+HI]
ldr r11, [r0,#8+LO]
ldr r12, [r0,#8+HI]
adds r9,r5,r9
str r9, [r0,#0+LO]
adc r10,r6,r10
str r10, [r0,#0+HI]
adds r11,r3,r11
str r11, [r0,#8+LO]
adc r12,r4,r12
str r12, [r0,#8+HI]
ldr r5,[sp,#16+0]
ldr r6,[sp,#16+4]
ldr r3,[sp,#24+0]
ldr r4,[sp,#24+4]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
adds r9,r5,r9
str r9, [r0,#16+LO]
adc r10,r6,r10
str r10, [r0,#16+HI]
adds r11,r3,r11
str r11, [r0,#24+LO]
adc r12,r4,r12
str r12, [r0,#24+HI]
ldr r3,[sp,#40+0]
ldr r4,[sp,#40+4]
ldr r9, [r0,#32+LO]
ldr r10, [r0,#32+HI]
ldr r11, [r0,#40+LO]
ldr r12, [r0,#40+HI]
adds r7,r7,r9
str r7,[r0,#32+LO]
adc r8,r8,r10
str r8,[r0,#32+HI]
adds r11,r3,r11
str r11, [r0,#40+LO]
adc r12,r4,r12
str r12, [r0,#40+HI]
ldr r5,[sp,#48+0]
ldr r6,[sp,#48+4]
ldr r3,[sp,#56+0]
ldr r4,[sp,#56+4]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
adds r9,r5,r9
str r9, [r0,#48+LO]
adc r10,r6,r10
str r10, [r0,#48+HI]
adds r11,r3,r11
str r11, [r0,#56+LO]
adc r12,r4,r12
str r12, [r0,#56+HI]
add sp,sp,#640
sub r14,r14,#640
teq r1,r2
bne .Loop
add sp,sp,#8*9 @ destroy frame
#if __ARM_ARCH>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl sha512_block_data_order_neon
.hidden sha512_block_data_order_neon
.type sha512_block_data_order_neon,%function
.align 4
sha512_block_data_order_neon:
dmb @ errata #451034 on early Cortex A8
add r2,r1,r2,lsl#7 @ len to point at the end of inp
adr r3,K512
VFP_ABI_PUSH
vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context
.Loop_neon:
vshr.u64 d24,d20,#14 @ 0
#if 0<16
vld1.64 {d0},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 0>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 0<16 && defined(__ARMEL__)
vrev64.8 d0,d0
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 1
#if 1<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 1>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 1<16 && defined(__ARMEL__)
vrev64.8 d1,d1
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 2
#if 2<16
vld1.64 {d2},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 2>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 2<16 && defined(__ARMEL__)
vrev64.8 d2,d2
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 3
#if 3<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 3>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 3<16 && defined(__ARMEL__)
vrev64.8 d3,d3
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 4
#if 4<16
vld1.64 {d4},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 4>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 4<16 && defined(__ARMEL__)
vrev64.8 d4,d4
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 5
#if 5<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 5>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 5<16 && defined(__ARMEL__)
vrev64.8 d5,d5
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 6
#if 6<16
vld1.64 {d6},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 6>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 6<16 && defined(__ARMEL__)
vrev64.8 d6,d6
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 7
#if 7<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 7>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 7<16 && defined(__ARMEL__)
vrev64.8 d7,d7
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 d24,d20,#14 @ 8
#if 8<16
vld1.64 {d8},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 8>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 8<16 && defined(__ARMEL__)
vrev64.8 d8,d8
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 9
#if 9<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 9>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 9<16 && defined(__ARMEL__)
vrev64.8 d9,d9
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 10
#if 10<16
vld1.64 {d10},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 10>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 10<16 && defined(__ARMEL__)
vrev64.8 d10,d10
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 11
#if 11<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 11>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 11<16 && defined(__ARMEL__)
vrev64.8 d11,d11
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 12
#if 12<16
vld1.64 {d12},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 12>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 12<16 && defined(__ARMEL__)
vrev64.8 d12,d12
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 13
#if 13<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 13>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 13<16 && defined(__ARMEL__)
vrev64.8 d13,d13
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 14
#if 14<16
vld1.64 {d14},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 14>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 14<16 && defined(__ARMEL__)
vrev64.8 d14,d14
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 15
#if 15<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 15>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 15<16 && defined(__ARMEL__)
vrev64.8 d15,d15
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
mov r12,#4
.L16_79_neon:
subs r12,#1
vshr.u64 q12,q7,#19
vshr.u64 q13,q7,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q7,#6
vsli.64 q12,q7,#45
vext.8 q14,q0,q1,#8 @ X[i+1]
vsli.64 q13,q7,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q0,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q4,q5,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q0,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q0,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 16<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 17
#if 17<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 17>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 17<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q0,#19
vshr.u64 q13,q0,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q0,#6
vsli.64 q12,q0,#45
vext.8 q14,q1,q2,#8 @ X[i+1]
vsli.64 q13,q0,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q1,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q5,q6,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q1,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q1,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 18<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 19
#if 19<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 19>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 19<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q1,#19
vshr.u64 q13,q1,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q1,#6
vsli.64 q12,q1,#45
vext.8 q14,q2,q3,#8 @ X[i+1]
vsli.64 q13,q1,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q2,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q6,q7,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q2,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q2,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 20<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 21
#if 21<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 21>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 21<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q2,#19
vshr.u64 q13,q2,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q2,#6
vsli.64 q12,q2,#45
vext.8 q14,q3,q4,#8 @ X[i+1]
vsli.64 q13,q2,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q3,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q7,q0,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q3,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q3,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 22<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 23
#if 23<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 23>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 23<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 q12,q3,#19
vshr.u64 q13,q3,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q3,#6
vsli.64 q12,q3,#45
vext.8 q14,q4,q5,#8 @ X[i+1]
vsli.64 q13,q3,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q4,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q0,q1,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q4,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q4,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 24<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 25
#if 25<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 25>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 25<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q4,#19
vshr.u64 q13,q4,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q4,#6
vsli.64 q12,q4,#45
vext.8 q14,q5,q6,#8 @ X[i+1]
vsli.64 q13,q4,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q5,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q1,q2,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q5,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q5,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 26<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 27
#if 27<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 27>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 27<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q5,#19
vshr.u64 q13,q5,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q5,#6
vsli.64 q12,q5,#45
vext.8 q14,q6,q7,#8 @ X[i+1]
vsli.64 q13,q5,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q6,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q2,q3,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q6,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q6,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 28<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 29
#if 29<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 29>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 29<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q6,#19
vshr.u64 q13,q6,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q6,#6
vsli.64 q12,q6,#45
vext.8 q14,q7,q0,#8 @ X[i+1]
vsli.64 q13,q6,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q7,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q3,q4,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q7,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q7,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 30<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 31
#if 31<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 31>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 31<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
bne .L16_79_neon
vadd.i64 d16,d30 @ h+=Maj from the past
vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp
vadd.i64 q8,q12 @ vectorized accumulate
vadd.i64 q9,q13
vadd.i64 q10,q14
vadd.i64 q11,q15
vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context
teq r1,r2
sub r3,#640 @ rewind K512
bne .Loop_neon
VFP_ABI_POP
bx lr @ .word 0xe12fff1e
.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
#endif
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 24,471
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aes-gcm-avx2-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.section __DATA,__const
.p2align 4
L$bswap_mask:
.quad 0x08090a0b0c0d0e0f, 0x0001020304050607
L$gfpoly:
.quad 1, 0xc200000000000000
L$gfpoly_and_internal_carrybit:
.quad 1, 0xc200000000000001
.p2align 5
L$ctr_pattern:
.quad 0, 0
.quad 1, 0
L$inc_2blocks:
.quad 2, 0
.quad 2, 0
.text
.globl _gcm_init_vpclmulqdq_avx2
.private_extern _gcm_init_vpclmulqdq_avx2
.p2align 5
_gcm_init_vpclmulqdq_avx2:
_CET_ENDBR
vpshufd $0x4e,(%rsi),%xmm3
vpshufd $0xd3,%xmm3,%xmm0
vpsrad $31,%xmm0,%xmm0
vpaddq %xmm3,%xmm3,%xmm3
vpand L$gfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0
vpxor %xmm0,%xmm3,%xmm3
vbroadcasti128 L$gfpoly(%rip),%ymm6
vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0
vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1
vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2
vpxor %xmm2,%xmm1,%xmm1
vpclmulqdq $0x01,%xmm0,%xmm6,%xmm2
vpshufd $0x4e,%xmm0,%xmm0
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5
vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0
vpshufd $0x4e,%xmm1,%xmm1
vpxor %xmm1,%xmm5,%xmm5
vpxor %xmm0,%xmm5,%xmm5
vinserti128 $1,%xmm3,%ymm5,%ymm3
vinserti128 $1,%xmm5,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xc5,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcd,0x01
.byte 0xc4,0xe3,0x65,0x44,0xd5,0x10
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x65,0x44,0xe5,0x11
.byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm4,%ymm4
vpxor %ymm0,%ymm4,%ymm4
vmovdqu %ymm3,96(%rdi)
vmovdqu %ymm4,64(%rdi)
vpunpcklqdq %ymm3,%ymm4,%ymm0
vpunpckhqdq %ymm3,%ymm4,%ymm1
vpxor %ymm1,%ymm0,%ymm0
vmovdqu %ymm0,128+32(%rdi)
.byte 0xc4,0xe3,0x5d,0x44,0xc5,0x00
.byte 0xc4,0xe3,0x5d,0x44,0xcd,0x01
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x10
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x5d,0x44,0xdd,0x11
.byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm3,%ymm3
vpxor %ymm0,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xc5,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcd,0x01
.byte 0xc4,0xe3,0x65,0x44,0xd5,0x10
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x65,0x44,0xe5,0x11
.byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm4,%ymm4
vpxor %ymm0,%ymm4,%ymm4
vmovdqu %ymm3,32(%rdi)
vmovdqu %ymm4,0(%rdi)
vpunpcklqdq %ymm3,%ymm4,%ymm0
vpunpckhqdq %ymm3,%ymm4,%ymm1
vpxor %ymm1,%ymm0,%ymm0
vmovdqu %ymm0,128(%rdi)
vzeroupper
ret
.globl _gcm_ghash_vpclmulqdq_avx2_1
.private_extern _gcm_ghash_vpclmulqdq_avx2_1
.p2align 5
_gcm_ghash_vpclmulqdq_avx2_1:
_CET_ENDBR
vmovdqu L$bswap_mask(%rip),%xmm6
vmovdqu L$gfpoly(%rip),%xmm7
vmovdqu (%rdi),%xmm5
vpshufb %xmm6,%xmm5,%xmm5
L$ghash_lastblock:
vmovdqu (%rdx),%xmm0
vpshufb %xmm6,%xmm0,%xmm0
vpxor %xmm0,%xmm5,%xmm5
vmovdqu 128-16(%rsi),%xmm0
vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1
vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2
vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3
vpxor %xmm3,%xmm2,%xmm2
vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3
vpshufd $0x4e,%xmm1,%xmm1
vpxor %xmm1,%xmm2,%xmm2
vpxor %xmm3,%xmm2,%xmm2
vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5
vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1
vpshufd $0x4e,%xmm2,%xmm2
vpxor %xmm2,%xmm5,%xmm5
vpxor %xmm1,%xmm5,%xmm5
L$ghash_done:
vpshufb %xmm6,%xmm5,%xmm5
vmovdqu %xmm5,(%rdi)
vzeroupper
ret
.globl _aes_gcm_enc_update_vaes_avx2
.private_extern _aes_gcm_enc_update_vaes_avx2
.p2align 5
_aes_gcm_enc_update_vaes_avx2:
_CET_ENDBR
pushq %r12
movq 16(%rsp),%r12
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+8(%rip)
#endif
vbroadcasti128 L$bswap_mask(%rip),%ymm0
vmovdqu (%r12),%xmm1
vpshufb %xmm0,%xmm1,%xmm1
vbroadcasti128 (%r8),%ymm11
vpshufb %ymm0,%ymm11,%ymm11
movl 240(%rcx),%r10d
leal -20(,%r10,4),%r10d
leaq 96(%rcx,%r10,4),%r11
vbroadcasti128 (%rcx),%ymm9
vbroadcasti128 (%r11),%ymm10
vpaddd L$ctr_pattern(%rip),%ymm11,%ymm11
cmpq $127,%rdx
jbe L$crypt_loop_4x_done__func1
vmovdqu 128(%r9),%ymm7
vmovdqu 128+32(%r9),%ymm8
vmovdqu L$inc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
leaq 16(%rcx),%rax
L$vaesenc_loop_first_4_vecs__func1:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_first_4_vecs__func1
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
.byte 0xc4,0x62,0x1d,0xdd,0xe2
.byte 0xc4,0x62,0x15,0xdd,0xeb
.byte 0xc4,0x62,0x0d,0xdd,0xf5
.byte 0xc4,0x62,0x05,0xdd,0xfe
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
addq $-128,%rdx
cmpq $127,%rdx
jbe L$ghash_last_ciphertext_4x__func1
.p2align 4
L$crypt_loop_4x__func1:
vmovdqu L$inc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
cmpl $24,%r10d
jl L$aes128__func1
je L$aes192__func1
vbroadcasti128 -208(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -192(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
L$aes192__func1:
vbroadcasti128 -176(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -160(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
L$aes128__func1:
prefetcht0 512(%rdi)
prefetcht0 512+64(%rdi)
vmovdqu 0(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xec,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcc,0x11
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00
vbroadcasti128 -144(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -128(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 32(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -112(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 64(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
vbroadcasti128 -96(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -80(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vbroadcasti128 -64(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 96(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -48(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 L$gfpoly(%rip),%ymm4
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -32(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -16(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
subq $-128,%rsi
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
.byte 0xc4,0x62,0x1d,0xdd,0xe2
.byte 0xc4,0x62,0x15,0xdd,0xeb
.byte 0xc4,0x62,0x0d,0xdd,0xf5
.byte 0xc4,0x62,0x05,0xdd,0xfe
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
addq $-128,%rdx
cmpq $127,%rdx
ja L$crypt_loop_4x__func1
L$ghash_last_ciphertext_4x__func1:
vmovdqu 0(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xec,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcc,0x11
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00
vmovdqu 32(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 64(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 96(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10
vpxor %ymm2,%ymm6,%ymm6
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 L$gfpoly(%rip),%ymm4
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
.byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
subq $-128,%rsi
L$crypt_loop_4x_done__func1:
testq %rdx,%rdx
jz L$done__func1
leaq 128(%r9),%r8
subq %rdx,%r8
vpxor %xmm5,%xmm5,%xmm5
vpxor %xmm6,%xmm6,%xmm6
vpxor %xmm7,%xmm7,%xmm7
cmpq $64,%rdx
jb L$lessthan64bytes__func1
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_1__func1:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_1__func1
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%ymm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %ymm3,%ymm13,%ymm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpshufb %ymm0,%ymm13,%ymm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%ymm3
.byte 0xc4,0xe3,0x1d,0x44,0xea,0x00
.byte 0xc4,0xe3,0x1d,0x44,0xf2,0x01
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xfa,0x11
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x11
vpxor %ymm4,%ymm7,%ymm7
addq $64,%r8
addq $64,%rdi
addq $64,%rsi
subq $64,%rdx
jz L$reduce__func1
vpxor %xmm1,%xmm1,%xmm1
L$lessthan64bytes__func1:
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_2__func1:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_2__func1
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
cmpq $32,%rdx
jb L$xor_one_block__func1
je L$xor_two_blocks__func1
L$xor_three_blocks__func1:
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%xmm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %xmm3,%xmm13,%xmm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %xmm13,32(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpshufb %xmm0,%xmm13,%xmm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%xmm3
vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm7,%ymm7
jmp L$ghash_mul_one_vec_unreduced__func1
L$xor_two_blocks__func1:
vmovdqu (%rdi),%ymm2
vpxor %ymm2,%ymm12,%ymm12
vmovdqu %ymm12,(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
jmp L$ghash_mul_one_vec_unreduced__func1
L$xor_one_block__func1:
vmovdqu (%rdi),%xmm2
vpxor %xmm2,%xmm12,%xmm12
vmovdqu %xmm12,(%rsi)
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm1,%xmm12,%xmm12
vmovdqu (%r8),%xmm2
L$ghash_mul_one_vec_unreduced__func1:
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x11
vpxor %ymm4,%ymm7,%ymm7
L$reduce__func1:
vbroadcasti128 L$gfpoly(%rip),%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xdd,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
.byte 0xc4,0xe3,0x6d,0x44,0xde,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm7,%ymm7
vpxor %ymm3,%ymm7,%ymm7
vextracti128 $1,%ymm7,%xmm1
vpxor %xmm7,%xmm1,%xmm1
L$done__func1:
vpshufb %xmm0,%xmm1,%xmm1
vmovdqu %xmm1,(%r12)
vzeroupper
popq %r12
ret
.globl _aes_gcm_dec_update_vaes_avx2
.private_extern _aes_gcm_dec_update_vaes_avx2
.p2align 5
_aes_gcm_dec_update_vaes_avx2:
_CET_ENDBR
pushq %r12
movq 16(%rsp),%r12
vbroadcasti128 L$bswap_mask(%rip),%ymm0
vmovdqu (%r12),%xmm1
vpshufb %xmm0,%xmm1,%xmm1
vbroadcasti128 (%r8),%ymm11
vpshufb %ymm0,%ymm11,%ymm11
movl 240(%rcx),%r10d
leal -20(,%r10,4),%r10d
leaq 96(%rcx,%r10,4),%r11
vbroadcasti128 (%rcx),%ymm9
vbroadcasti128 (%r11),%ymm10
vpaddd L$ctr_pattern(%rip),%ymm11,%ymm11
cmpq $127,%rdx
jbe L$crypt_loop_4x_done__func2
vmovdqu 128(%r9),%ymm7
vmovdqu 128+32(%r9),%ymm8
.p2align 4
L$crypt_loop_4x__func2:
vmovdqu L$inc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
cmpl $24,%r10d
jl L$aes128__func2
je L$aes192__func2
vbroadcasti128 -208(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -192(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
L$aes192__func2:
vbroadcasti128 -176(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -160(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
L$aes128__func2:
prefetcht0 512(%rdi)
prefetcht0 512+64(%rdi)
vmovdqu 0(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xec,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcc,0x11
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00
vbroadcasti128 -144(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -128(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 32(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -112(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 64(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
vbroadcasti128 -96(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -80(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vbroadcasti128 -64(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 96(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -48(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 L$gfpoly(%rip),%ymm4
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -32(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -16(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
.byte 0xc4,0x62,0x1d,0xdd,0xe2
.byte 0xc4,0x62,0x15,0xdd,0xeb
.byte 0xc4,0x62,0x0d,0xdd,0xf5
.byte 0xc4,0x62,0x05,0xdd,0xfe
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
subq $-128,%rsi
addq $-128,%rdx
cmpq $127,%rdx
ja L$crypt_loop_4x__func2
L$crypt_loop_4x_done__func2:
testq %rdx,%rdx
jz L$done__func2
leaq 128(%r9),%r8
subq %rdx,%r8
vpxor %xmm5,%xmm5,%xmm5
vpxor %xmm6,%xmm6,%xmm6
vpxor %xmm7,%xmm7,%xmm7
cmpq $64,%rdx
jb L$lessthan64bytes__func2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_1__func2:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_1__func2
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%ymm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %ymm3,%ymm13,%ymm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpshufb %ymm0,%ymm3,%ymm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%ymm3
.byte 0xc4,0xe3,0x1d,0x44,0xea,0x00
.byte 0xc4,0xe3,0x1d,0x44,0xf2,0x01
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xfa,0x11
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x11
vpxor %ymm4,%ymm7,%ymm7
addq $64,%r8
addq $64,%rdi
addq $64,%rsi
subq $64,%rdx
jz L$reduce__func2
vpxor %xmm1,%xmm1,%xmm1
L$lessthan64bytes__func2:
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_2__func2:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_2__func2
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
cmpq $32,%rdx
jb L$xor_one_block__func2
je L$xor_two_blocks__func2
L$xor_three_blocks__func2:
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%xmm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %xmm3,%xmm13,%xmm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %xmm13,32(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpshufb %xmm0,%xmm3,%xmm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%xmm3
vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm7,%ymm7
jmp L$ghash_mul_one_vec_unreduced__func2
L$xor_two_blocks__func2:
vmovdqu (%rdi),%ymm2
vpxor %ymm2,%ymm12,%ymm12
vmovdqu %ymm12,(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
jmp L$ghash_mul_one_vec_unreduced__func2
L$xor_one_block__func2:
vmovdqu (%rdi),%xmm2
vpxor %xmm2,%xmm12,%xmm12
vmovdqu %xmm12,(%rsi)
vpshufb %xmm0,%xmm2,%xmm12
vpxor %xmm1,%xmm12,%xmm12
vmovdqu (%r8),%xmm2
L$ghash_mul_one_vec_unreduced__func2:
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x11
vpxor %ymm4,%ymm7,%ymm7
L$reduce__func2:
vbroadcasti128 L$gfpoly(%rip),%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xdd,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
.byte 0xc4,0xe3,0x6d,0x44,0xde,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm7,%ymm7
vpxor %ymm3,%ymm7,%ymm7
vextracti128 $1,%ymm7,%xmm1
vpxor %xmm7,%xmm1,%xmm1
L$done__func2:
vpshufb %xmm0,%xmm1,%xmm1
vmovdqu %xmm1,(%r12)
vzeroupper
popq %r12
ret
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 20,965
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aesni-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.type _aesni_encrypt2,@function
.align 16
_aesni_encrypt2:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
.Lenc_loop2:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop2
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.cfi_endproc
.size _aesni_encrypt2,.-_aesni_encrypt2
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
.Lenc_loop3:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop3
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.cfi_endproc
.size _aesni_encrypt3,.-_aesni_encrypt3
.type _aesni_encrypt4,@function
.align 16
_aesni_encrypt4:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
xorps %xmm0,%xmm5
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 0x0f,0x1f,0x00
addq $16,%rax
.Lenc_loop4:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop4
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.cfi_endproc
.size _aesni_encrypt4,.-_aesni_encrypt4
.type _aesni_encrypt6,@function
.align 16
_aesni_encrypt6:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,217
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp .Lenc_loop6_enter
.align 16
.Lenc_loop6:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.Lenc_loop6_enter:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop6
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.cfi_endproc
.size _aesni_encrypt6,.-_aesni_encrypt6
.type _aesni_encrypt8,@function
.align 16
_aesni_encrypt8:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,209
pxor %xmm0,%xmm7
pxor %xmm0,%xmm8
.byte 102,15,56,220,217
pxor %xmm0,%xmm9
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp .Lenc_loop8_inner
.align 16
.Lenc_loop8:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.Lenc_loop8_inner:
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
.Lenc_loop8_enter:
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop8
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
.byte 102,68,15,56,221,192
.byte 102,68,15,56,221,200
ret
.cfi_endproc
.size _aesni_encrypt8,.-_aesni_encrypt8
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,@function
.align 16
aes_hw_ctr32_encrypt_blocks:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit(%rip)
#endif
cmpq $1,%rdx
jne .Lctr32_bulk
movups (%r8),%xmm2
movups (%rdi),%xmm3
movl 240(%rcx),%edx
movups (%rcx),%xmm0
movups 16(%rcx),%xmm1
leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
.Loop_enc1_1:
.byte 102,15,56,220,209
decl %edx
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
jnz .Loop_enc1_1
.byte 102,15,56,221,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
xorps %xmm3,%xmm2
pxor %xmm3,%xmm3
movups %xmm2,(%rsi)
xorps %xmm2,%xmm2
jmp .Lctr32_epilogue
.align 16
.Lctr32_bulk:
leaq (%rsp),%r11
.cfi_def_cfa_register %r11
pushq %rbp
.cfi_offset %rbp,-16
subq $128,%rsp
andq $-16,%rsp
movdqu (%r8),%xmm2
movdqu (%rcx),%xmm0
movl 12(%r8),%r8d
pxor %xmm0,%xmm2
movl 12(%rcx),%ebp
movdqa %xmm2,0(%rsp)
bswapl %r8d
movdqa %xmm2,%xmm3
movdqa %xmm2,%xmm4
movdqa %xmm2,%xmm5
movdqa %xmm2,64(%rsp)
movdqa %xmm2,80(%rsp)
movdqa %xmm2,96(%rsp)
movq %rdx,%r10
movdqa %xmm2,112(%rsp)
leaq 1(%r8),%rax
leaq 2(%r8),%rdx
bswapl %eax
bswapl %edx
xorl %ebp,%eax
xorl %ebp,%edx
.byte 102,15,58,34,216,3
leaq 3(%r8),%rax
movdqa %xmm3,16(%rsp)
.byte 102,15,58,34,226,3
bswapl %eax
movq %r10,%rdx
leaq 4(%r8),%r10
movdqa %xmm4,32(%rsp)
xorl %ebp,%eax
bswapl %r10d
.byte 102,15,58,34,232,3
xorl %ebp,%r10d
movdqa %xmm5,48(%rsp)
leaq 5(%r8),%r9
movl %r10d,64+12(%rsp)
bswapl %r9d
leaq 6(%r8),%r10
movl 240(%rcx),%eax
xorl %ebp,%r9d
bswapl %r10d
movl %r9d,80+12(%rsp)
xorl %ebp,%r10d
leaq 7(%r8),%r9
movl %r10d,96+12(%rsp)
bswapl %r9d
xorl %ebp,%r9d
movl %r9d,112+12(%rsp)
movups 16(%rcx),%xmm1
movdqa 64(%rsp),%xmm6
movdqa 80(%rsp),%xmm7
cmpq $8,%rdx
jb .Lctr32_tail
leaq 128(%rcx),%rcx
subq $8,%rdx
jmp .Lctr32_loop8
.align 32
.Lctr32_loop8:
addl $8,%r8d
movdqa 96(%rsp),%xmm8
.byte 102,15,56,220,209
movl %r8d,%r9d
movdqa 112(%rsp),%xmm9
.byte 102,15,56,220,217
bswapl %r9d
movups 32-128(%rcx),%xmm0
.byte 102,15,56,220,225
xorl %ebp,%r9d
nop
.byte 102,15,56,220,233
movl %r9d,0+12(%rsp)
leaq 1(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 48-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,16+12(%rsp)
leaq 2(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 64-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,32+12(%rsp)
leaq 3(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 80-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,48+12(%rsp)
leaq 4(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 96-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,64+12(%rsp)
leaq 5(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 112-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,80+12(%rsp)
leaq 6(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 128-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,96+12(%rsp)
leaq 7(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 144-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
xorl %ebp,%r9d
movdqu 0(%rdi),%xmm10
.byte 102,15,56,220,232
movl %r9d,112+12(%rsp)
cmpl $11,%eax
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 160-128(%rcx),%xmm0
jb .Lctr32_enc_done
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 176-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 192-128(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 208-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 224-128(%rcx),%xmm0
jmp .Lctr32_enc_done
.align 16
.Lctr32_enc_done:
movdqu 16(%rdi),%xmm11
pxor %xmm0,%xmm10
movdqu 32(%rdi),%xmm12
pxor %xmm0,%xmm11
movdqu 48(%rdi),%xmm13
pxor %xmm0,%xmm12
movdqu 64(%rdi),%xmm14
pxor %xmm0,%xmm13
movdqu 80(%rdi),%xmm15
pxor %xmm0,%xmm14
prefetcht0 448(%rdi)
prefetcht0 512(%rdi)
pxor %xmm0,%xmm15
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movdqu 96(%rdi),%xmm1
leaq 128(%rdi),%rdi
.byte 102,65,15,56,221,210
pxor %xmm0,%xmm1
movdqu 112-128(%rdi),%xmm10
.byte 102,65,15,56,221,219
pxor %xmm0,%xmm10
movdqa 0(%rsp),%xmm11
.byte 102,65,15,56,221,228
.byte 102,65,15,56,221,237
movdqa 16(%rsp),%xmm12
movdqa 32(%rsp),%xmm13
.byte 102,65,15,56,221,246
.byte 102,65,15,56,221,255
movdqa 48(%rsp),%xmm14
movdqa 64(%rsp),%xmm15
.byte 102,68,15,56,221,193
movdqa 80(%rsp),%xmm0
movups 16-128(%rcx),%xmm1
.byte 102,69,15,56,221,202
movups %xmm2,(%rsi)
movdqa %xmm11,%xmm2
movups %xmm3,16(%rsi)
movdqa %xmm12,%xmm3
movups %xmm4,32(%rsi)
movdqa %xmm13,%xmm4
movups %xmm5,48(%rsi)
movdqa %xmm14,%xmm5
movups %xmm6,64(%rsi)
movdqa %xmm15,%xmm6
movups %xmm7,80(%rsi)
movdqa %xmm0,%xmm7
movups %xmm8,96(%rsi)
movups %xmm9,112(%rsi)
leaq 128(%rsi),%rsi
subq $8,%rdx
jnc .Lctr32_loop8
addq $8,%rdx
jz .Lctr32_done
leaq -128(%rcx),%rcx
.Lctr32_tail:
leaq 16(%rcx),%rcx
cmpq $4,%rdx
jb .Lctr32_loop3
je .Lctr32_loop4
shll $4,%eax
movdqa 96(%rsp),%xmm8
pxor %xmm9,%xmm9
movups 16(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
leaq 32-16(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,225
addq $16,%rax
movups (%rdi),%xmm10
.byte 102,15,56,220,233
.byte 102,15,56,220,241
movups 16(%rdi),%xmm11
movups 32(%rdi),%xmm12
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
call .Lenc_loop8_enter
movdqu 48(%rdi),%xmm13
pxor %xmm10,%xmm2
movdqu 64(%rdi),%xmm10
pxor %xmm11,%xmm3
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm4,32(%rsi)
pxor %xmm10,%xmm6
movdqu %xmm5,48(%rsi)
movdqu %xmm6,64(%rsi)
cmpq $6,%rdx
jb .Lctr32_done
movups 80(%rdi),%xmm11
xorps %xmm11,%xmm7
movups %xmm7,80(%rsi)
je .Lctr32_done
movups 96(%rdi),%xmm12
xorps %xmm12,%xmm8
movups %xmm8,96(%rsi)
jmp .Lctr32_done
.align 32
.Lctr32_loop4:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx),%xmm1
jnz .Lctr32_loop4
.byte 102,15,56,221,209
.byte 102,15,56,221,217
movups (%rdi),%xmm10
movups 16(%rdi),%xmm11
.byte 102,15,56,221,225
.byte 102,15,56,221,233
movups 32(%rdi),%xmm12
movups 48(%rdi),%xmm13
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm4,32(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm5,48(%rsi)
jmp .Lctr32_done
.align 32
.Lctr32_loop3:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx),%xmm1
jnz .Lctr32_loop3
.byte 102,15,56,221,209
.byte 102,15,56,221,217
.byte 102,15,56,221,225
movups (%rdi),%xmm10
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
cmpq $2,%rdx
jb .Lctr32_done
movups 16(%rdi),%xmm11
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
je .Lctr32_done
movups 32(%rdi),%xmm12
xorps %xmm12,%xmm4
movups %xmm4,32(%rsi)
.Lctr32_done:
xorps %xmm0,%xmm0
xorl %ebp,%ebp
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movaps %xmm0,0(%rsp)
pxor %xmm8,%xmm8
movaps %xmm0,16(%rsp)
pxor %xmm9,%xmm9
movaps %xmm0,32(%rsp)
pxor %xmm10,%xmm10
movaps %xmm0,48(%rsp)
pxor %xmm11,%xmm11
movaps %xmm0,64(%rsp)
pxor %xmm12,%xmm12
movaps %xmm0,80(%rsp)
pxor %xmm13,%xmm13
movaps %xmm0,96(%rsp)
pxor %xmm14,%xmm14
movaps %xmm0,112(%rsp)
pxor %xmm15,%xmm15
movq -8(%r11),%rbp
.cfi_restore %rbp
leaq (%r11),%rsp
.cfi_def_cfa_register %rsp
.Lctr32_epilogue:
ret
.cfi_endproc
.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks
.globl aes_hw_set_encrypt_key_base
.hidden aes_hw_set_encrypt_key_base
.type aes_hw_set_encrypt_key_base,@function
.align 16
aes_hw_set_encrypt_key_base:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
.cfi_adjust_cfa_offset 8
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je .L14rounds
cmpl $128,%esi
jne .Lbad_keybits
.L10rounds:
movl $9,%esi
movups %xmm0,(%rdx)
.byte 102,15,58,223,200,1
call .Lkey_expansion_128_cold
.byte 102,15,58,223,200,2
call .Lkey_expansion_128
.byte 102,15,58,223,200,4
call .Lkey_expansion_128
.byte 102,15,58,223,200,8
call .Lkey_expansion_128
.byte 102,15,58,223,200,16
call .Lkey_expansion_128
.byte 102,15,58,223,200,32
call .Lkey_expansion_128
.byte 102,15,58,223,200,64
call .Lkey_expansion_128
.byte 102,15,58,223,200,128
call .Lkey_expansion_128
.byte 102,15,58,223,200,27
call .Lkey_expansion_128
.byte 102,15,58,223,200,54
call .Lkey_expansion_128
movups %xmm0,(%rax)
movl %esi,80(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret
.align 16
.L14rounds:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movups %xmm0,(%rdx)
movups %xmm2,16(%rdx)
.byte 102,15,58,223,202,1
call .Lkey_expansion_256a_cold
.byte 102,15,58,223,200,1
call .Lkey_expansion_256b
.byte 102,15,58,223,202,2
call .Lkey_expansion_256a
.byte 102,15,58,223,200,2
call .Lkey_expansion_256b
.byte 102,15,58,223,202,4
call .Lkey_expansion_256a
.byte 102,15,58,223,200,4
call .Lkey_expansion_256b
.byte 102,15,58,223,202,8
call .Lkey_expansion_256a
.byte 102,15,58,223,200,8
call .Lkey_expansion_256b
.byte 102,15,58,223,202,16
call .Lkey_expansion_256a
.byte 102,15,58,223,200,16
call .Lkey_expansion_256b
.byte 102,15,58,223,202,32
call .Lkey_expansion_256a
.byte 102,15,58,223,200,32
call .Lkey_expansion_256b
.byte 102,15,58,223,202,64
call .Lkey_expansion_256a
movups %xmm0,(%rax)
movl %esi,16(%rax)
xorq %rax,%rax
jmp .Lenc_key_ret
.align 16
.Lbad_keybits:
movq $-2,%rax
.Lenc_key_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
.align 16
.Lkey_expansion_128:
.cfi_startproc
movups %xmm0,(%rax)
leaq 16(%rax),%rax
.Lkey_expansion_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.cfi_endproc
.align 16
.Lkey_expansion_256a:
.cfi_startproc
movups %xmm2,(%rax)
leaq 16(%rax),%rax
.Lkey_expansion_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.cfi_endproc
.align 16
.Lkey_expansion_256b:
.cfi_startproc
movups %xmm0,(%rax)
leaq 16(%rax),%rax
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.cfi_endproc
.size aes_hw_set_encrypt_key_base,.-aes_hw_set_encrypt_key_base
.globl aes_hw_set_encrypt_key_alt
.hidden aes_hw_set_encrypt_key_alt
.type aes_hw_set_encrypt_key_alt,@function
.align 16
aes_hw_set_encrypt_key_alt:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
.cfi_adjust_cfa_offset 8
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je .L14rounds_alt
cmpl $128,%esi
jne .Lbad_keybits_alt
movl $9,%esi
movdqa .Lkey_rotate(%rip),%xmm5
movl $8,%r10d
movdqa .Lkey_rcon1(%rip),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,(%rdx)
jmp .Loop_key128
.align 16
.Loop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leaq 16(%rax),%rax
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%rax)
movdqa %xmm0,%xmm2
decl %r10d
jnz .Loop_key128
movdqa .Lkey_rcon1b(%rip),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%rax)
movl %esi,96(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret_alt
.align 16
.L14rounds_alt:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movdqa .Lkey_rotate(%rip),%xmm5
movdqa .Lkey_rcon1(%rip),%xmm4
movl $7,%r10d
movdqu %xmm0,0(%rdx)
movdqa %xmm2,%xmm1
movdqu %xmm2,16(%rdx)
jmp .Loop_key256
.align 16
.Loop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
decl %r10d
jz .Ldone_key256
pshufd $0xff,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%rax)
leaq 32(%rax),%rax
movdqa %xmm2,%xmm1
jmp .Loop_key256
.Ldone_key256:
movl %esi,16(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret_alt
.align 16
.Lbad_keybits_alt:
movq $-2,%rax
.Lenc_key_ret_alt:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
.size aes_hw_set_encrypt_key_alt,.-aes_hw_set_encrypt_key_alt
.section .rodata
.align 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.Lincrement32:
.long 6,6,6,0
.Lincrement64:
.long 1,0,0,0
.Lincrement1:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
.Lkey_rotate:
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
.Lkey_rotate192:
.long 0x04070605,0x04070605,0x04070605,0x04070605
.Lkey_rcon1:
.long 1,1,1,1
.Lkey_rcon1b:
.long 0x1b,0x1b,0x1b,0x1b
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.text
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 11,047
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/vpaes-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.p2align 4
_vpaes_encrypt_core:
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa L$k_ipt(%rip),%xmm2
pandn %xmm0,%xmm1
movdqu (%r9),%xmm5
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,208
movdqa L$k_ipt+16(%rip),%xmm0
.byte 102,15,56,0,193
pxor %xmm5,%xmm2
addq $16,%r9
pxor %xmm2,%xmm0
leaq L$k_mc_backward(%rip),%r10
jmp L$enc_entry
.p2align 4
L$enc_loop:
movdqa %xmm13,%xmm4
movdqa %xmm12,%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa %xmm15,%xmm5
pxor %xmm4,%xmm0
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
movdqa (%r11,%r10,1),%xmm4
movdqa %xmm14,%xmm2
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addq $16,%r9
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
L$enc_entry:
movdqa %xmm9,%xmm1
movdqa %xmm11,%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,232
movdqa %xmm10,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm10,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm10,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm10,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
jnz L$enc_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.p2align 4
_vpaes_encrypt_core_2x:
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa L$k_ipt(%rip),%xmm2
movdqa %xmm2,%xmm8
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
movdqu (%r9),%xmm5
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,208
.byte 102,68,15,56,0,198
movdqa L$k_ipt+16(%rip),%xmm0
movdqa %xmm0,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,247
pxor %xmm5,%xmm2
pxor %xmm5,%xmm8
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
leaq L$k_mc_backward(%rip),%r10
jmp L$enc2x_entry
.p2align 4
L$enc2x_loop:
movdqa L$k_sb1(%rip),%xmm4
movdqa L$k_sb1+16(%rip),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
movdqa L$k_sb2(%rip),%xmm5
movdqa %xmm5,%xmm13
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
.byte 102,69,15,56,0,232
movdqa (%r11,%r10,1),%xmm4
movdqa L$k_sb2+16(%rip),%xmm2
movdqa %xmm2,%xmm8
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm0,%xmm3
movdqa %xmm6,%xmm11
pxor %xmm5,%xmm2
pxor %xmm13,%xmm8
.byte 102,15,56,0,193
.byte 102,15,56,0,241
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
.byte 102,15,56,0,220
.byte 102,68,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
pxor %xmm6,%xmm11
.byte 102,15,56,0,193
.byte 102,15,56,0,241
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
pxor %xmm11,%xmm6
L$enc2x_entry:
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa L$k_inv+16(%rip),%xmm5
movdqa %xmm5,%xmm13
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,232
.byte 102,68,15,56,0,238
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm1,%xmm0
pxor %xmm7,%xmm6
.byte 102,15,56,0,217
.byte 102,68,15,56,0,223
movdqa %xmm10,%xmm4
movdqa %xmm10,%xmm12
pxor %xmm5,%xmm3
pxor %xmm13,%xmm11
.byte 102,15,56,0,224
.byte 102,68,15,56,0,230
movdqa %xmm10,%xmm2
movdqa %xmm10,%xmm8
pxor %xmm5,%xmm4
pxor %xmm13,%xmm12
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm0,%xmm2
pxor %xmm6,%xmm8
.byte 102,15,56,0,220
.byte 102,69,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
pxor %xmm7,%xmm11
jnz L$enc2x_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
ret
.p2align 4
_vpaes_schedule_core:
call _vpaes_preheat
movdqa L$k_rcon(%rip),%xmm8
movdqu (%rdi),%xmm0
movdqa %xmm0,%xmm3
leaq L$k_ipt(%rip),%r11
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
leaq L$k_sr(%rip),%r10
movdqu %xmm0,(%rdx)
L$schedule_go:
cmpl $192,%esi
ja L$schedule_256
L$schedule_128:
movl $10,%esi
L$oop_schedule_128:
call _vpaes_schedule_round
decq %rsi
jz L$schedule_mangle_last
call _vpaes_schedule_mangle
jmp L$oop_schedule_128
.p2align 4
L$schedule_256:
movdqu 16(%rdi),%xmm0
call _vpaes_schedule_transform
movl $7,%esi
L$oop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decq %rsi
jz L$schedule_mangle_last
call _vpaes_schedule_mangle
pshufd $0xFF,%xmm0,%xmm0
movdqa %xmm7,%xmm5
movdqa %xmm6,%xmm7
call _vpaes_schedule_low_round
movdqa %xmm5,%xmm7
jmp L$oop_schedule_256
.p2align 4
L$schedule_mangle_last:
leaq L$k_deskew(%rip),%r11
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,193
leaq L$k_opt(%rip),%r11
addq $32,%rdx
L$schedule_mangle_last_dec:
addq $-16,%rdx
pxor L$k_s63(%rip),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%rdx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.p2align 4
_vpaes_schedule_round:
pxor %xmm1,%xmm1
.byte 102,65,15,58,15,200,15
.byte 102,69,15,58,15,192,15
pxor %xmm1,%xmm7
pshufd $0xFF,%xmm0,%xmm0
.byte 102,15,58,15,192,1
_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor L$k_s63(%rip),%xmm7
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa %xmm11,%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm10,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm10,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm10,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm10,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa %xmm13,%xmm4
.byte 102,15,56,0,226
movdqa %xmm12,%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.p2align 4
_vpaes_schedule_transform:
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa (%r11),%xmm2
.byte 102,15,56,0,208
movdqa 16(%r11),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.p2align 4
_vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa L$k_mc_forward(%rip),%xmm5
addq $16,%rdx
pxor L$k_s63(%rip),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
L$schedule_mangle_both:
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217
addq $-16,%r8
andq $0x30,%r8
movdqu %xmm3,(%rdx)
ret
.globl _vpaes_set_encrypt_key
.private_extern _vpaes_set_encrypt_key
.p2align 4
_vpaes_set_encrypt_key:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+5(%rip)
#endif
movl %esi,%eax
shrl $5,%eax
addl $5,%eax
movl %eax,240(%rdx)
movl $0,%ecx
movl $0x30,%r8d
call _vpaes_schedule_core
xorl %eax,%eax
ret
.globl _vpaes_ctr32_encrypt_blocks
.private_extern _vpaes_ctr32_encrypt_blocks
.p2align 4
_vpaes_ctr32_encrypt_blocks:
_CET_ENDBR
xchgq %rcx,%rdx
testq %rcx,%rcx
jz L$ctr32_abort
movdqu (%r8),%xmm0
movdqa L$ctr_add_one(%rip),%xmm8
subq %rdi,%rsi
call _vpaes_preheat
movdqa %xmm0,%xmm6
pshufb L$rev_ctr(%rip),%xmm6
testq $1,%rcx
jz L$ctr32_prep_loop
movdqu (%rdi),%xmm7
call _vpaes_encrypt_core
pxor %xmm7,%xmm0
paddd %xmm8,%xmm6
movdqu %xmm0,(%rsi,%rdi,1)
subq $1,%rcx
leaq 16(%rdi),%rdi
jz L$ctr32_done
L$ctr32_prep_loop:
movdqa %xmm6,%xmm14
movdqa %xmm6,%xmm15
paddd %xmm8,%xmm15
L$ctr32_loop:
movdqa L$rev_ctr(%rip),%xmm1
movdqa %xmm14,%xmm0
movdqa %xmm15,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
call _vpaes_encrypt_core_2x
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa L$ctr_add_two(%rip),%xmm3
pxor %xmm1,%xmm0
pxor %xmm2,%xmm6
paddd %xmm3,%xmm14
paddd %xmm3,%xmm15
movdqu %xmm0,(%rsi,%rdi,1)
movdqu %xmm6,16(%rsi,%rdi,1)
subq $2,%rcx
leaq 32(%rdi),%rdi
jnz L$ctr32_loop
L$ctr32_done:
L$ctr32_abort:
ret
.p2align 4
_vpaes_preheat:
leaq L$k_s0F(%rip),%r10
movdqa -32(%r10),%xmm10
movdqa -16(%r10),%xmm11
movdqa 0(%r10),%xmm9
movdqa 48(%r10),%xmm13
movdqa 64(%r10),%xmm12
movdqa 80(%r10),%xmm15
movdqa 96(%r10),%xmm14
ret
.section __DATA,__const
.p2align 6
_vpaes_consts:
L$k_inv:
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
L$k_s0F:
.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
L$k_ipt:
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
L$k_sb1:
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
L$k_sb2:
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
L$k_sbo:
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
L$k_mc_forward:
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
L$k_mc_backward:
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
L$k_sr:
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
L$k_rcon:
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
L$k_s63:
.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
L$k_opt:
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
L$k_deskew:
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
L$rev_ctr:
.quad 0x0706050403020100, 0x0c0d0e0f0b0a0908
L$ctr_add_one:
.quad 0x0000000000000000, 0x0000000100000000
L$ctr_add_two:
.quad 0x0000000000000000, 0x0000000200000000
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.p2align 6
.text
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 78,605
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/p256-x86_64-asm-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.section .rodata
.align 64
.Lpoly:
.quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
.LOne:
.long 1,1,1,1,1,1,1,1
.LTwo:
.long 2,2,2,2,2,2,2,2
.LThree:
.long 3,3,3,3,3,3,3,3
.LONE_mont:
.quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
.Lord:
.quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000
.LordK:
.quad 0xccd1c8aaee00bc4f
.text
.globl ecp_nistz256_neg
.hidden ecp_nistz256_neg
.type ecp_nistz256_neg,@function
.align 32
ecp_nistz256_neg:
.cfi_startproc
_CET_ENDBR
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
.Lneg_body:
xorq %r8,%r8
xorq %r9,%r9
xorq %r10,%r10
xorq %r11,%r11
xorq %r13,%r13
subq 0(%rsi),%r8
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r8,%rax
sbbq 24(%rsi),%r11
leaq .Lpoly(%rip),%rsi
movq %r9,%rdx
sbbq $0,%r13
addq 0(%rsi),%r8
movq %r10,%rcx
adcq 8(%rsi),%r9
adcq 16(%rsi),%r10
movq %r11,%r12
adcq 24(%rsi),%r11
testq %r13,%r13
cmovzq %rax,%r8
cmovzq %rdx,%r9
movq %r8,0(%rdi)
cmovzq %rcx,%r10
movq %r9,8(%rdi)
cmovzq %r12,%r11
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq 0(%rsp),%r13
.cfi_restore %r13
movq 8(%rsp),%r12
.cfi_restore %r12
leaq 16(%rsp),%rsp
.cfi_adjust_cfa_offset -16
.Lneg_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_neg,.-ecp_nistz256_neg
.globl ecp_nistz256_ord_mul_mont_nohw
.hidden ecp_nistz256_ord_mul_mont_nohw
.type ecp_nistz256_ord_mul_mont_nohw,@function
.align 32
ecp_nistz256_ord_mul_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_mul_body:
movq 0(%rdx),%rax
movq %rdx,%rbx
leaq .Lord(%rip),%r14
movq .LordK(%rip),%r15
movq %rax,%rcx
mulq 0(%rsi)
movq %rax,%r8
movq %rcx,%rax
movq %rdx,%r9
mulq 8(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq 16(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %r8,%r13
imulq %r15,%r8
movq %rdx,%r11
mulq 24(%rsi)
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq 0(%r14)
movq %r8,%rbp
addq %rax,%r13
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rcx
subq %r8,%r10
sbbq $0,%r8
mulq 8(%r14)
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %rbp,%rax
adcq %rdx,%r10
movq %rbp,%rdx
adcq $0,%r8
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 8(%rbx),%rax
sbbq %rdx,%rbp
addq %r8,%r11
adcq %rbp,%r12
adcq $0,%r13
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %r9,%rcx
imulq %r15,%r9
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r12
adcq $0,%rdx
xorq %r8,%r8
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
mulq 0(%r14)
movq %r9,%rbp
addq %rax,%rcx
movq %r9,%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%r9
mulq 8(%r14)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq %rdx,%r11
movq %rbp,%rdx
adcq $0,%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r12
movq 16(%rbx),%rax
sbbq %rdx,%rbp
addq %r9,%r12
adcq %rbp,%r13
adcq $0,%r8
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %r10,%rcx
imulq %r15,%r10
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r13
adcq $0,%rdx
xorq %r9,%r9
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
mulq 0(%r14)
movq %r10,%rbp
addq %rax,%rcx
movq %r10,%rax
adcq %rdx,%rcx
subq %r10,%r12
sbbq $0,%r10
mulq 8(%r14)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq %rdx,%r12
movq %rbp,%rdx
adcq $0,%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r13
movq 24(%rbx),%rax
sbbq %rdx,%rbp
addq %r10,%r13
adcq %rbp,%r8
adcq $0,%r9
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rcx,%rax
adcq $0,%rdx
movq %r11,%rcx
imulq %r15,%r11
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r8
adcq $0,%rdx
xorq %r10,%r10
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
mulq 0(%r14)
movq %r11,%rbp
addq %rax,%rcx
movq %r11,%rax
adcq %rdx,%rcx
subq %r11,%r13
sbbq $0,%r11
mulq 8(%r14)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq %rdx,%r13
movq %rbp,%rdx
adcq $0,%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
sbbq %rdx,%rbp
addq %r11,%r8
adcq %rbp,%r9
adcq $0,%r10
movq %r12,%rsi
subq 0(%r14),%r12
movq %r13,%r11
sbbq 8(%r14),%r13
movq %r8,%rcx
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rsi,%r12
cmovcq %r11,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_mul_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_mul_mont_nohw,.-ecp_nistz256_ord_mul_mont_nohw
.globl ecp_nistz256_ord_sqr_mont_nohw
.hidden ecp_nistz256_ord_sqr_mont_nohw
.type ecp_nistz256_ord_sqr_mont_nohw,@function
.align 32
ecp_nistz256_ord_sqr_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_sqr_body:
movq 0(%rsi),%r8
movq 8(%rsi),%rax
movq 16(%rsi),%r14
movq 24(%rsi),%r15
leaq .Lord(%rip),%rsi
movq %rdx,%rbx
jmp .Loop_ord_sqr
.align 32
.Loop_ord_sqr:
movq %rax,%rbp
mulq %r8
movq %rax,%r9
.byte 102,72,15,110,205
movq %r14,%rax
movq %rdx,%r10
mulq %r8
addq %rax,%r10
movq %r15,%rax
.byte 102,73,15,110,214
adcq $0,%rdx
movq %rdx,%r11
mulq %r8
addq %rax,%r11
movq %r15,%rax
.byte 102,73,15,110,223
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
movq %rax,%r13
movq %r14,%rax
movq %rdx,%r14
mulq %rbp
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r15
mulq %rbp
addq %rax,%r12
adcq $0,%rdx
addq %r15,%r12
adcq %rdx,%r13
adcq $0,%r14
xorq %r15,%r15
movq %r8,%rax
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
.byte 102,72,15,126,200
movq %rdx,%rbp
mulq %rax
addq %rbp,%r9
adcq %rax,%r10
.byte 102,72,15,126,208
adcq $0,%rdx
movq %rdx,%rbp
mulq %rax
addq %rbp,%r11
adcq %rax,%r12
.byte 102,72,15,126,216
adcq $0,%rdx
movq %rdx,%rbp
movq %r8,%rcx
imulq 32(%rsi),%r8
mulq %rax
addq %rbp,%r13
adcq %rax,%r14
movq 0(%rsi),%rax
adcq %rdx,%r15
mulq %r8
movq %r8,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r8,%r10
sbbq $0,%rbp
mulq %r8
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %r8,%rax
adcq %rdx,%r10
movq %r8,%rdx
adcq $0,%rbp
movq %r9,%rcx
imulq 32(%rsi),%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 0(%rsi),%rax
sbbq %rdx,%r8
addq %rbp,%r11
adcq $0,%r8
mulq %r9
movq %r9,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%rbp
mulq %r9
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %r9,%rax
adcq %rdx,%r11
movq %r9,%rdx
adcq $0,%rbp
movq %r10,%rcx
imulq 32(%rsi),%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
movq 0(%rsi),%rax
sbbq %rdx,%r9
addq %rbp,%r8
adcq $0,%r9
mulq %r10
movq %r10,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r10,%r8
sbbq $0,%rbp
mulq %r10
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %r10,%rax
adcq %rdx,%r8
movq %r10,%rdx
adcq $0,%rbp
movq %r11,%rcx
imulq 32(%rsi),%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r9
movq 0(%rsi),%rax
sbbq %rdx,%r10
addq %rbp,%r9
adcq $0,%r10
mulq %r11
movq %r11,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r11,%r9
sbbq $0,%rbp
mulq %r11
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
movq %r11,%rdx
adcq $0,%rbp
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r10
sbbq %rdx,%r11
addq %rbp,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r12,%r8
adcq %r13,%r9
movq %r8,%r12
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%rax
adcq $0,%rdx
subq 0(%rsi),%r8
movq %r10,%r14
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r15
sbbq 24(%rsi),%r11
sbbq $0,%rdx
cmovcq %r12,%r8
cmovncq %r9,%rax
cmovncq %r10,%r14
cmovncq %r11,%r15
decq %rbx
jnz .Loop_ord_sqr
movq %r8,0(%rdi)
movq %rax,8(%rdi)
pxor %xmm1,%xmm1
movq %r14,16(%rdi)
pxor %xmm2,%xmm2
movq %r15,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_sqr_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_sqr_mont_nohw,.-ecp_nistz256_ord_sqr_mont_nohw
.globl ecp_nistz256_ord_mul_mont_adx
.hidden ecp_nistz256_ord_mul_mont_adx
.type ecp_nistz256_ord_mul_mont_adx,@function
.align 32
ecp_nistz256_ord_mul_mont_adx:
.cfi_startproc
.Lecp_nistz256_ord_mul_mont_adx:
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_mulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
leaq .Lord-128(%rip),%r14
movq .LordK(%rip),%r15
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
mulxq %r11,%rbp,%r11
addq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
mulxq %r15,%rdx,%rax
adcq %rbp,%r10
adcq %rcx,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24+128(%r14),%rcx,%rbp
movq 8(%rbx),%rdx
adcxq %rcx,%r11
adoxq %rbp,%r12
adcxq %r8,%r12
adoxq %r8,%r13
adcq $0,%r13
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%r14),%rcx,%rbp
movq 16(%rbx),%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r9,%r13
adoxq %r9,%r8
adcq $0,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%r14),%rcx,%rbp
movq 24(%rbx),%rdx
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r10,%r8
adoxq %r10,%r9
adcq $0,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r8
adoxq %rbp,%r9
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%r14),%rcx,%rbp
leaq 128(%r14),%r14
movq %r12,%rbx
adcxq %rcx,%r8
adoxq %rbp,%r9
movq %r13,%rdx
adcxq %r11,%r9
adoxq %r11,%r10
adcq $0,%r10
movq %r8,%rcx
subq 0(%r14),%r12
sbbq 8(%r14),%r13
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_mulx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_mul_mont_adx,.-ecp_nistz256_ord_mul_mont_adx
.globl ecp_nistz256_ord_sqr_mont_adx
.hidden ecp_nistz256_ord_sqr_mont_adx
.type ecp_nistz256_ord_sqr_mont_adx,@function
.align 32
ecp_nistz256_ord_sqr_mont_adx:
.cfi_startproc
_CET_ENDBR
.Lecp_nistz256_ord_sqr_mont_adx:
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_sqrx_body:
movq %rdx,%rbx
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq .Lord(%rip),%rsi
jmp .Loop_ord_sqrx
.align 32
.Loop_ord_sqrx:
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
movq %rdx,%rax
.byte 102,73,15,110,206
mulxq %r8,%rbp,%r12
movq %r14,%rdx
addq %rcx,%r10
.byte 102,73,15,110,215
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq %rax,%rdx
.byte 102,73,15,110,216
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
.byte 102,72,15,126,202
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
.byte 102,72,15,126,210
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
mulxq %rdx,%rcx,%rbp
.byte 0x67
.byte 102,72,15,126,218
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
adoxq %rbp,%r13
mulxq %rdx,%rcx,%rax
adoxq %rcx,%r14
adoxq %rax,%r15
movq %r8,%rdx
mulxq 32(%rsi),%rdx,%rcx
xorq %rax,%rax
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
adcxq %rax,%r8
movq %r9,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
adoxq %rax,%r9
movq %r10,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
adcxq %rax,%r10
movq %r11,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
adoxq %rax,%r11
addq %r8,%r12
adcq %r13,%r9
movq %r12,%rdx
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%r14
adcq $0,%rax
subq 0(%rsi),%r12
movq %r10,%r15
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r8
sbbq 24(%rsi),%r11
sbbq $0,%rax
cmovncq %r12,%rdx
cmovncq %r9,%r14
cmovncq %r10,%r15
cmovncq %r11,%r8
decq %rbx
jnz .Loop_ord_sqrx
movq %rdx,0(%rdi)
movq %r14,8(%rdi)
pxor %xmm1,%xmm1
movq %r15,16(%rdi)
pxor %xmm2,%xmm2
movq %r8,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_sqrx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_sqr_mont_adx,.-ecp_nistz256_ord_sqr_mont_adx
.globl ecp_nistz256_mul_mont_nohw
.hidden ecp_nistz256_mul_mont_nohw
.type ecp_nistz256_mul_mont_nohw,@function
.align 32
ecp_nistz256_mul_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lmul_body:
movq %rdx,%rbx
movq 0(%rdx),%rax
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
call __ecp_nistz256_mul_montq
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lmul_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_mul_mont_nohw,.-ecp_nistz256_mul_mont_nohw
.type __ecp_nistz256_mul_montq,@function
.align 32
__ecp_nistz256_mul_montq:
.cfi_startproc
movq %rax,%rbp
mulq %r9
movq .Lpoly+8(%rip),%r14
movq %rax,%r8
movq %rbp,%rax
movq %rdx,%r9
mulq %r10
movq .Lpoly+24(%rip),%r15
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %r11
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r12
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
xorq %r13,%r13
movq %rdx,%r12
movq %r8,%rbp
shlq $32,%r8
mulq %r15
shrq $32,%rbp
addq %r8,%r9
adcq %rbp,%r10
adcq %rax,%r11
movq 8(%rbx),%rax
adcq %rdx,%r12
adcq $0,%r13
xorq %r8,%r8
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
movq %r9,%rbp
shlq $32,%r9
mulq %r15
shrq $32,%rbp
addq %r9,%r10
adcq %rbp,%r11
adcq %rax,%r12
movq 16(%rbx),%rax
adcq %rdx,%r13
adcq $0,%r8
xorq %r9,%r9
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
movq %r10,%rbp
shlq $32,%r10
mulq %r15
shrq $32,%rbp
addq %r10,%r11
adcq %rbp,%r12
adcq %rax,%r13
movq 24(%rbx),%rax
adcq %rdx,%r8
adcq $0,%r9
xorq %r10,%r10
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
movq %r11,%rbp
shlq $32,%r11
mulq %r15
shrq $32,%rbp
addq %r11,%r12
adcq %rbp,%r13
movq %r12,%rcx
adcq %rax,%r8
adcq %rdx,%r9
movq %r13,%rbp
adcq $0,%r10
subq $-1,%r12
movq %r8,%rbx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rdx
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rcx,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rbx,%r8
movq %r13,8(%rdi)
cmovcq %rdx,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
.globl ecp_nistz256_sqr_mont_nohw
.hidden ecp_nistz256_sqr_mont_nohw
.type ecp_nistz256_sqr_mont_nohw,@function
.align 32
ecp_nistz256_sqr_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lsqr_body:
movq 0(%rsi),%rax
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
call __ecp_nistz256_sqr_montq
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lsqr_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_sqr_mont_nohw,.-ecp_nistz256_sqr_mont_nohw
.type __ecp_nistz256_sqr_montq,@function
.align 32
__ecp_nistz256_sqr_montq:
.cfi_startproc
movq %rax,%r13
mulq %r14
movq %rax,%r9
movq %r15,%rax
movq %rdx,%r10
mulq %r13
addq %rax,%r10
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r13
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq %r14
addq %rax,%r12
movq %r8,%rax
adcq $0,%rdx
addq %rbp,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %r15
xorq %r15,%r15
addq %rax,%r13
movq 0(%rsi),%rax
movq %rdx,%r14
adcq $0,%r14
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
movq 8(%rsi),%rax
movq %rdx,%rcx
mulq %rax
addq %rcx,%r9
adcq %rax,%r10
movq 16(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r11
adcq %rax,%r12
movq 24(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r13
adcq %rax,%r14
movq %r8,%rax
adcq %rdx,%r15
movq .Lpoly+8(%rip),%rsi
movq .Lpoly+24(%rip),%rbp
movq %r8,%rcx
shlq $32,%r8
mulq %rbp
shrq $32,%rcx
addq %r8,%r9
adcq %rcx,%r10
adcq %rax,%r11
movq %r9,%rax
adcq $0,%rdx
movq %r9,%rcx
shlq $32,%r9
movq %rdx,%r8
mulq %rbp
shrq $32,%rcx
addq %r9,%r10
adcq %rcx,%r11
adcq %rax,%r8
movq %r10,%rax
adcq $0,%rdx
movq %r10,%rcx
shlq $32,%r10
movq %rdx,%r9
mulq %rbp
shrq $32,%rcx
addq %r10,%r11
adcq %rcx,%r8
adcq %rax,%r9
movq %r11,%rax
adcq $0,%rdx
movq %r11,%rcx
shlq $32,%r11
movq %rdx,%r10
mulq %rbp
shrq $32,%rcx
addq %r11,%r8
adcq %rcx,%r9
adcq %rax,%r10
adcq $0,%rdx
xorq %r11,%r11
addq %r8,%r12
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %rdx,%r15
movq %r13,%r9
adcq $0,%r11
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%rcx
sbbq %rbp,%r15
sbbq $0,%r11
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %rcx,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
.globl ecp_nistz256_mul_mont_adx
.hidden ecp_nistz256_mul_mont_adx
.type ecp_nistz256_mul_mont_adx,@function
.align 32
ecp_nistz256_mul_mont_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lmulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lmulx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_mul_mont_adx,.-ecp_nistz256_mul_mont_adx
.type __ecp_nistz256_mul_montx,@function
.align 32
__ecp_nistz256_mul_montx:
.cfi_startproc
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
movq $32,%r14
xorq %r13,%r13
mulxq %r11,%rbp,%r11
movq .Lpoly+24(%rip),%r15
adcq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
adcq %rbp,%r10
shlxq %r14,%r8,%rbp
adcq %rcx,%r11
shrxq %r14,%r8,%rcx
adcq $0,%r12
addq %rbp,%r9
adcq %rcx,%r10
mulxq %r15,%rcx,%rbp
movq 8(%rbx),%rdx
adcq %rcx,%r11
adcq %rbp,%r12
adcq $0,%r13
xorq %r8,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
adcxq %rcx,%r12
shlxq %r14,%r9,%rcx
adoxq %rbp,%r13
shrxq %r14,%r9,%rbp
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
addq %rcx,%r10
adcq %rbp,%r11
mulxq %r15,%rcx,%rbp
movq 16(%rbx),%rdx
adcq %rcx,%r12
adcq %rbp,%r13
adcq $0,%r8
xorq %r9,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
adcxq %rcx,%r13
shlxq %r14,%r10,%rcx
adoxq %rbp,%r8
shrxq %r14,%r10,%rbp
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
addq %rcx,%r11
adcq %rbp,%r12
mulxq %r15,%rcx,%rbp
movq 24(%rbx),%rdx
adcq %rcx,%r13
adcq %rbp,%r8
adcq $0,%r9
xorq %r10,%r10
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
adcxq %rcx,%r8
shlxq %r14,%r11,%rcx
adoxq %rbp,%r9
shrxq %r14,%r11,%rbp
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
addq %rcx,%r12
adcq %rbp,%r13
mulxq %r15,%rcx,%rbp
movq %r12,%rbx
movq .Lpoly+8(%rip),%r14
adcq %rcx,%r8
movq %r13,%rdx
adcq %rbp,%r9
adcq $0,%r10
xorl %eax,%eax
movq %r8,%rcx
sbbq $-1,%r12
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rbp
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %rbp,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
.globl ecp_nistz256_sqr_mont_adx
.hidden ecp_nistz256_sqr_mont_adx
.type ecp_nistz256_sqr_mont_adx,@function
.align 32
ecp_nistz256_sqr_mont_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lsqrx_body:
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq -128(%rsi),%rsi
call __ecp_nistz256_sqr_montx
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lsqrx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_sqr_mont_adx,.-ecp_nistz256_sqr_mont_adx
.type __ecp_nistz256_sqr_montx,@function
.align 32
__ecp_nistz256_sqr_montx:
.cfi_startproc
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
xorl %eax,%eax
adcq %rcx,%r10
mulxq %r8,%rbp,%r12
movq %r14,%rdx
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq 0+128(%rsi),%rdx
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
movq 8+128(%rsi),%rdx
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
movq 16+128(%rsi),%rdx
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
.byte 0x67
mulxq %rdx,%rcx,%rbp
movq 24+128(%rsi),%rdx
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
movq $32,%rsi
adoxq %rbp,%r13
.byte 0x67,0x67
mulxq %rdx,%rcx,%rax
movq .Lpoly+24(%rip),%rdx
adoxq %rcx,%r14
shlxq %rsi,%r8,%rcx
adoxq %rax,%r15
shrxq %rsi,%r8,%rax
movq %rdx,%rbp
addq %rcx,%r9
adcq %rax,%r10
mulxq %r8,%rcx,%r8
adcq %rcx,%r11
shlxq %rsi,%r9,%rcx
adcq $0,%r8
shrxq %rsi,%r9,%rax
addq %rcx,%r10
adcq %rax,%r11
mulxq %r9,%rcx,%r9
adcq %rcx,%r8
shlxq %rsi,%r10,%rcx
adcq $0,%r9
shrxq %rsi,%r10,%rax
addq %rcx,%r11
adcq %rax,%r8
mulxq %r10,%rcx,%r10
adcq %rcx,%r9
shlxq %rsi,%r11,%rcx
adcq $0,%r10
shrxq %rsi,%r11,%rax
addq %rcx,%r8
adcq %rax,%r9
mulxq %r11,%rcx,%r11
adcq %rcx,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r8,%r12
movq .Lpoly+8(%rip),%rsi
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %r11,%r15
movq %r13,%r9
adcq $0,%rdx
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%r11
sbbq %rbp,%r15
sbbq $0,%rdx
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %r11,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
.globl ecp_nistz256_select_w5_nohw
.hidden ecp_nistz256_select_w5_nohw
.type ecp_nistz256_select_w5_nohw,@function
.align 32
ecp_nistz256_select_w5_nohw:
.cfi_startproc
_CET_ENDBR
movdqa .LOne(%rip),%xmm0
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movdqa %xmm0,%xmm8
pshufd $0,%xmm1,%xmm1
movq $16,%rax
.Lselect_loop_sse_w5:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
pcmpeqd %xmm1,%xmm15
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
movdqa 64(%rsi),%xmm13
movdqa 80(%rsi),%xmm14
leaq 96(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
pand %xmm15,%xmm13
por %xmm12,%xmm5
pand %xmm15,%xmm14
por %xmm13,%xmm6
por %xmm14,%xmm7
decq %rax
jnz .Lselect_loop_sse_w5
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
movdqu %xmm6,64(%rdi)
movdqu %xmm7,80(%rdi)
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w5_nohw:
.size ecp_nistz256_select_w5_nohw,.-ecp_nistz256_select_w5_nohw
.globl ecp_nistz256_select_w7_nohw
.hidden ecp_nistz256_select_w7_nohw
.type ecp_nistz256_select_w7_nohw,@function
.align 32
ecp_nistz256_select_w7_nohw:
.cfi_startproc
_CET_ENDBR
movdqa .LOne(%rip),%xmm8
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa %xmm8,%xmm0
pshufd $0,%xmm1,%xmm1
movq $64,%rax
.Lselect_loop_sse_w7:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
pcmpeqd %xmm1,%xmm15
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
leaq 64(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
prefetcht0 255(%rsi)
por %xmm12,%xmm5
decq %rax
jnz .Lselect_loop_sse_w7
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w7_nohw:
.size ecp_nistz256_select_w7_nohw,.-ecp_nistz256_select_w7_nohw
.globl ecp_nistz256_select_w5_avx2
.hidden ecp_nistz256_select_w5_avx2
.type ecp_nistz256_select_w5_avx2,@function
.align 32
ecp_nistz256_select_w5_avx2:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqa .LTwo(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vpxor %ymm4,%ymm4,%ymm4
vmovdqa .LOne(%rip),%ymm5
vmovdqa .LTwo(%rip),%ymm10
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $8,%rax
.Lselect_loop_avx2_w5:
vmovdqa 0(%rsi),%ymm6
vmovdqa 32(%rsi),%ymm7
vmovdqa 64(%rsi),%ymm8
vmovdqa 96(%rsi),%ymm11
vmovdqa 128(%rsi),%ymm12
vmovdqa 160(%rsi),%ymm13
vpcmpeqd %ymm1,%ymm5,%ymm9
vpcmpeqd %ymm1,%ymm10,%ymm14
vpaddd %ymm0,%ymm5,%ymm5
vpaddd %ymm0,%ymm10,%ymm10
leaq 192(%rsi),%rsi
vpand %ymm9,%ymm6,%ymm6
vpand %ymm9,%ymm7,%ymm7
vpand %ymm9,%ymm8,%ymm8
vpand %ymm14,%ymm11,%ymm11
vpand %ymm14,%ymm12,%ymm12
vpand %ymm14,%ymm13,%ymm13
vpxor %ymm6,%ymm2,%ymm2
vpxor %ymm7,%ymm3,%ymm3
vpxor %ymm8,%ymm4,%ymm4
vpxor %ymm11,%ymm2,%ymm2
vpxor %ymm12,%ymm3,%ymm3
vpxor %ymm13,%ymm4,%ymm4
decq %rax
jnz .Lselect_loop_avx2_w5
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vmovdqu %ymm4,64(%rdi)
vzeroupper
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w5_avx2:
.size ecp_nistz256_select_w5_avx2,.-ecp_nistz256_select_w5_avx2
.globl ecp_nistz256_select_w7_avx2
.hidden ecp_nistz256_select_w7_avx2
.type ecp_nistz256_select_w7_avx2,@function
.align 32
ecp_nistz256_select_w7_avx2:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqa .LThree(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vmovdqa .LOne(%rip),%ymm4
vmovdqa .LTwo(%rip),%ymm8
vmovdqa .LThree(%rip),%ymm12
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $21,%rax
.Lselect_loop_avx2_w7:
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vmovdqa 64(%rsi),%ymm9
vmovdqa 96(%rsi),%ymm10
vmovdqa 128(%rsi),%ymm13
vmovdqa 160(%rsi),%ymm14
vpcmpeqd %ymm1,%ymm4,%ymm7
vpcmpeqd %ymm1,%ymm8,%ymm11
vpcmpeqd %ymm1,%ymm12,%ymm15
vpaddd %ymm0,%ymm4,%ymm4
vpaddd %ymm0,%ymm8,%ymm8
vpaddd %ymm0,%ymm12,%ymm12
leaq 192(%rsi),%rsi
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpand %ymm11,%ymm9,%ymm9
vpand %ymm11,%ymm10,%ymm10
vpand %ymm15,%ymm13,%ymm13
vpand %ymm15,%ymm14,%ymm14
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vpxor %ymm9,%ymm2,%ymm2
vpxor %ymm10,%ymm3,%ymm3
vpxor %ymm13,%ymm2,%ymm2
vpxor %ymm14,%ymm3,%ymm3
decq %rax
jnz .Lselect_loop_avx2_w7
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vpcmpeqd %ymm1,%ymm4,%ymm7
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vzeroupper
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w7_avx2:
.size ecp_nistz256_select_w7_avx2,.-ecp_nistz256_select_w7_avx2
.type __ecp_nistz256_add_toq,@function
.align 32
__ecp_nistz256_add_toq:
.cfi_startproc
xorq %r11,%r11
addq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
.type __ecp_nistz256_sub_fromq,@function
.align 32
__ecp_nistz256_sub_fromq:
.cfi_startproc
subq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq %r11,%r11
addq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
testq %r11,%r11
cmovzq %rax,%r12
cmovzq %rbp,%r13
movq %r12,0(%rdi)
cmovzq %rcx,%r8
movq %r13,8(%rdi)
cmovzq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
.type __ecp_nistz256_subq,@function
.align 32
__ecp_nistz256_subq:
.cfi_startproc
subq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq %r11,%r11
addq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
testq %r11,%r11
cmovnzq %rax,%r12
cmovnzq %rbp,%r13
cmovnzq %rcx,%r8
cmovnzq %r10,%r9
ret
.cfi_endproc
.size __ecp_nistz256_subq,.-__ecp_nistz256_subq
.type __ecp_nistz256_mul_by_2q,@function
.align 32
__ecp_nistz256_mul_by_2q:
.cfi_startproc
xorq %r11,%r11
addq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
.globl ecp_nistz256_point_double_nohw
.hidden ecp_nistz256_point_double_nohw
.type ecp_nistz256_point_double_nohw,@function
.align 32
ecp_nistz256_point_double_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $160+8,%rsp
.cfi_adjust_cfa_offset 32*5+8
.Lpoint_doubleq_body:
.Lpoint_double_shortcutq:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq .Lpoly+8(%rip),%r14
movq .Lpoly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-0(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 32(%rbx),%rax
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-0(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montq
call __ecp_nistz256_mul_by_2q
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montq
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rax
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 0+32(%rsp),%rax
movq 8+32(%rsp),%r14
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montq
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subq
movq 32(%rsp),%rax
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-0(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromq
leaq 160+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_doubleq_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_double_nohw,.-ecp_nistz256_point_double_nohw
.globl ecp_nistz256_point_add_nohw
.hidden ecp_nistz256_point_add_nohw
.type ecp_nistz256_point_add_nohw,@function
.align 32
ecp_nistz256_point_add_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $576+8,%rsp
.cfi_adjust_cfa_offset 32*18+8
.Lpoint_addq_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-0(%rsi),%rsi
movq %rax,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rax
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-0(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 416(%rsp),%rax
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 512(%rsp),%rax
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq 0+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 480(%rsp),%rax
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz .Ladd_proceedq
testq %r9,%r9
jz .Ladd_doubleq
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp .Ladd_doneq
.align 32
.Ladd_doubleq:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
.cfi_adjust_cfa_offset -416
jmp .Lpoint_double_shortcutq
.cfi_adjust_cfa_offset 416
.align 32
.Ladd_proceedq:
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq 0+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0(%rsp),%rax
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 160(%rsp),%rax
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
.Ladd_doneq:
leaq 576+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_addq_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_nohw,.-ecp_nistz256_point_add_nohw
.globl ecp_nistz256_point_add_affine_nohw
.hidden ecp_nistz256_point_add_affine_nohw
.type ecp_nistz256_point_add_affine_nohw,@function
.align 32
ecp_nistz256_point_add_affine_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $480+8,%rsp
.cfi_adjust_cfa_offset 32*15+8
.Ladd_affineq_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-0(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rax
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-0(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+96(%rsp),%rax
movq 8+96(%rsp),%r14
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq 0+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rax
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq 0+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand .LONE_mont(%rip),%xmm2
pand .LONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Ladd_affineq_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_affine_nohw,.-ecp_nistz256_point_add_affine_nohw
.type __ecp_nistz256_add_tox,@function
.align 32
__ecp_nistz256_add_tox:
.cfi_startproc
xorq %r11,%r11
adcq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
.type __ecp_nistz256_sub_fromx,@function
.align 32
__ecp_nistz256_sub_fromx:
.cfi_startproc
xorq %r11,%r11
sbbq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq $0,%r11
xorq %r10,%r10
adcq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
btq $0,%r11
cmovncq %rax,%r12
cmovncq %rbp,%r13
movq %r12,0(%rdi)
cmovncq %rcx,%r8
movq %r13,8(%rdi)
cmovncq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
.type __ecp_nistz256_subx,@function
.align 32
__ecp_nistz256_subx:
.cfi_startproc
xorq %r11,%r11
sbbq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq $0,%r11
xorq %r9,%r9
adcq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
btq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
cmovcq %rcx,%r8
cmovcq %r10,%r9
ret
.cfi_endproc
.size __ecp_nistz256_subx,.-__ecp_nistz256_subx
.type __ecp_nistz256_mul_by_2x,@function
.align 32
__ecp_nistz256_mul_by_2x:
.cfi_startproc
xorq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
.globl ecp_nistz256_point_double_adx
.hidden ecp_nistz256_point_double_adx
.type ecp_nistz256_point_double_adx,@function
.align 32
ecp_nistz256_point_double_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $160+8,%rsp
.cfi_adjust_cfa_offset 32*5+8
.Lpoint_doublex_body:
.Lpoint_double_shortcutx:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq .Lpoly+8(%rip),%r14
movq .Lpoly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-128(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 32(%rbx),%rdx
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-128(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montx
call __ecp_nistz256_mul_by_2x
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montx
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rdx
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 0+32(%rsp),%rdx
movq 8+32(%rsp),%r14
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montx
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subx
movq 32(%rsp),%rdx
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-128(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromx
leaq 160+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_doublex_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_double_adx,.-ecp_nistz256_point_double_adx
.globl ecp_nistz256_point_add_adx
.hidden ecp_nistz256_point_add_adx
.type ecp_nistz256_point_add_adx,@function
.align 32
ecp_nistz256_point_add_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $576+8,%rsp
.cfi_adjust_cfa_offset 32*18+8
.Lpoint_addx_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-128(%rsi),%rsi
movq %rdx,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rdx
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-128(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 416(%rsp),%rdx
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 512(%rsp),%rdx
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq -128+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 480(%rsp),%rdx
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz .Ladd_proceedx
testq %r9,%r9
jz .Ladd_doublex
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp .Ladd_donex
.align 32
.Ladd_doublex:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
.cfi_adjust_cfa_offset -416
jmp .Lpoint_double_shortcutx
.cfi_adjust_cfa_offset 416
.align 32
.Ladd_proceedx:
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq -128+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%rdx
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 160(%rsp),%rdx
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
.Ladd_donex:
leaq 576+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_addx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_adx,.-ecp_nistz256_point_add_adx
.globl ecp_nistz256_point_add_affine_adx
.hidden ecp_nistz256_point_add_affine_adx
.type ecp_nistz256_point_add_affine_adx,@function
.align 32
ecp_nistz256_point_add_affine_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $480+8,%rsp
.cfi_adjust_cfa_offset 32*15+8
.Ladd_affinex_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-128(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rdx
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-128(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+96(%rsp),%rdx
movq 8+96(%rsp),%r14
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq -128+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rdx
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq -128+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand .LONE_mont(%rip),%xmm2
pand .LONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Ladd_affinex_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_affine_adx,.-ecp_nistz256_point_add_affine_adx
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 30,650
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/armv8-mont-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.text
.globl bn_mul_mont_nohw
.def bn_mul_mont_nohw
.type 32
.endef
.align 5
bn_mul_mont_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
ldr x9,[x2],#8 // bp[0]
sub x22,sp,x5,lsl#3
ldp x7,x8,[x1],#16 // ap[0..1]
lsl x5,x5,#3
ldr x4,[x4] // *n0
and x22,x22,#-16 // ABI says so
ldp x13,x14,[x3],#16 // np[0..1]
mul x6,x7,x9 // ap[0]*bp[0]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
mul x10,x8,x9 // ap[1]*bp[0]
umulh x11,x8,x9
mul x15,x6,x4 // "tp[0]"*n0
mov sp,x22 // alloca
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6 // discarded
// (*) As for removal of first multiplication and addition
// instructions. The outcome of first addition is
// guaranteed to be zero, which leaves two computationally
// significant outcomes: it either carries or not. Then
// question is when does it carry? Is there alternative
// way to deduce it? If you follow operations, you can
// observe that condition for carry is quite simple:
// x6 being non-zero. So that carry can be calculated
// by adding -1 to x6. That's what next instruction does.
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
adc x13,x13,xzr
cbz x21,L1st_skip
L1st:
ldr x8,[x1],#8
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
ldr x14,[x3],#8
adds x12,x16,x13
mul x10,x8,x9 // ap[j]*bp[0]
adc x13,x17,xzr
umulh x11,x8,x9
adds x12,x12,x6
mul x16,x14,x15 // np[j]*m1
adc x13,x13,xzr
umulh x17,x14,x15
str x12,[x22],#8 // tp[j-1]
cbnz x21,L1st
L1st_skip:
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adc x13,x17,xzr
adds x12,x12,x6
sub x20,x5,#8 // i=num-1
adcs x13,x13,x7
adc x19,xzr,xzr // upmost overflow bit
stp x12,x13,[x22]
Louter:
ldr x9,[x2],#8 // bp[i]
ldp x7,x8,[x1],#16
ldr x23,[sp] // tp[0]
add x22,sp,#8
mul x6,x7,x9 // ap[0]*bp[i]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
ldp x13,x14,[x3],#16
mul x10,x8,x9 // ap[1]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x15,x6,x4
sub x20,x20,#8 // i--
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
cbz x21,Linner_skip
Linner:
ldr x8,[x1],#8
adc x13,x13,xzr
ldr x23,[x22],#8 // tp[j]
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
adds x12,x16,x13
ldr x14,[x3],#8
adc x13,x17,xzr
mul x10,x8,x9 // ap[j]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x16,x14,x15 // np[j]*m1
adds x12,x12,x6
umulh x17,x14,x15
str x12,[x22,#-16] // tp[j-1]
cbnz x21,Linner
Linner_skip:
ldr x23,[x22],#8 // tp[j]
adc x13,x13,xzr
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adcs x13,x17,x19
adc x19,xzr,xzr
adds x6,x6,x23
adc x7,x7,xzr
adds x12,x12,x6
adcs x13,x13,x7
adc x19,x19,xzr // upmost overflow bit
stp x12,x13,[x22,#-16]
cbnz x20,Louter
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x14,[x3],#8 // np[0]
subs x21,x5,#8 // j=num-1 and clear borrow
mov x1,x0
Lsub:
sbcs x8,x23,x14 // tp[j]-np[j]
ldr x23,[x22],#8
sub x21,x21,#8 // j--
ldr x14,[x3],#8
str x8,[x1],#8 // rp[j]=tp[j]-np[j]
cbnz x21,Lsub
sbcs x8,x23,x14
sbcs x19,x19,xzr // did it borrow?
str x8,[x1],#8 // rp[num-1]
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x8,[x0],#8 // rp[0]
sub x5,x5,#8 // num--
nop
Lcond_copy:
sub x5,x5,#8 // num--
csel x14,x23,x8,lo // did it borrow?
ldr x23,[x22],#8
ldr x8,[x0],#8
str xzr,[x22,#-16] // wipe tp
str x14,[x0,#-16]
cbnz x5,Lcond_copy
csel x14,x23,x8,lo
str xzr,[x22,#-8] // wipe tp
str x14,[x0,#-8]
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldr x29,[sp],#64
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl bn_sqr8x_mont
.def bn_sqr8x_mont
.type 32
.endef
.align 5
bn_sqr8x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x3,[sp,#96] // offload rp and np
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
ldp x12,x13,[x1,#8*6]
sub x2,sp,x5,lsl#4
lsl x5,x5,#3
ldr x4,[x4] // *n0
mov sp,x2 // alloca
sub x27,x5,#8*8
b Lsqr8x_zero_start
Lsqr8x_zero:
sub x27,x27,#8*8
stp xzr,xzr,[x2,#8*0]
stp xzr,xzr,[x2,#8*2]
stp xzr,xzr,[x2,#8*4]
stp xzr,xzr,[x2,#8*6]
Lsqr8x_zero_start:
stp xzr,xzr,[x2,#8*8]
stp xzr,xzr,[x2,#8*10]
stp xzr,xzr,[x2,#8*12]
stp xzr,xzr,[x2,#8*14]
add x2,x2,#8*16
cbnz x27,Lsqr8x_zero
add x3,x1,x5
add x1,x1,#8*8
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
mov x23,xzr
mov x24,xzr
mov x25,xzr
mov x26,xzr
mov x2,sp
str x4,[x29,#112] // offload n0
// Multiply everything but a[i]*a[i]
.align 4
Lsqr8x_outer_loop:
// a[1]a[0] (i)
// a[2]a[0]
// a[3]a[0]
// a[4]a[0]
// a[5]a[0]
// a[6]a[0]
// a[7]a[0]
// a[2]a[1] (ii)
// a[3]a[1]
// a[4]a[1]
// a[5]a[1]
// a[6]a[1]
// a[7]a[1]
// a[3]a[2] (iii)
// a[4]a[2]
// a[5]a[2]
// a[6]a[2]
// a[7]a[2]
// a[4]a[3] (iv)
// a[5]a[3]
// a[6]a[3]
// a[7]a[3]
// a[5]a[4] (v)
// a[6]a[4]
// a[7]a[4]
// a[6]a[5] (vi)
// a[7]a[5]
// a[7]a[6] (vii)
mul x14,x7,x6 // lo(a[1..7]*a[0]) (i)
mul x15,x8,x6
mul x16,x9,x6
mul x17,x10,x6
adds x20,x20,x14 // t[1]+lo(a[1]*a[0])
mul x14,x11,x6
adcs x21,x21,x15
mul x15,x12,x6
adcs x22,x22,x16
mul x16,x13,x6
adcs x23,x23,x17
umulh x17,x7,x6 // hi(a[1..7]*a[0])
adcs x24,x24,x14
umulh x14,x8,x6
adcs x25,x25,x15
umulh x15,x9,x6
adcs x26,x26,x16
umulh x16,x10,x6
stp x19,x20,[x2],#8*2 // t[0..1]
adc x19,xzr,xzr // t[8]
adds x21,x21,x17 // t[2]+lo(a[1]*a[0])
umulh x17,x11,x6
adcs x22,x22,x14
umulh x14,x12,x6
adcs x23,x23,x15
umulh x15,x13,x6
adcs x24,x24,x16
mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii)
adcs x25,x25,x17
mul x17,x9,x7
adcs x26,x26,x14
mul x14,x10,x7
adc x19,x19,x15
mul x15,x11,x7
adds x22,x22,x16
mul x16,x12,x7
adcs x23,x23,x17
mul x17,x13,x7
adcs x24,x24,x14
umulh x14,x8,x7 // hi(a[2..7]*a[1])
adcs x25,x25,x15
umulh x15,x9,x7
adcs x26,x26,x16
umulh x16,x10,x7
adcs x19,x19,x17
umulh x17,x11,x7
stp x21,x22,[x2],#8*2 // t[2..3]
adc x20,xzr,xzr // t[9]
adds x23,x23,x14
umulh x14,x12,x7
adcs x24,x24,x15
umulh x15,x13,x7
adcs x25,x25,x16
mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii)
adcs x26,x26,x17
mul x17,x10,x8
adcs x19,x19,x14
mul x14,x11,x8
adc x20,x20,x15
mul x15,x12,x8
adds x24,x24,x16
mul x16,x13,x8
adcs x25,x25,x17
umulh x17,x9,x8 // hi(a[3..7]*a[2])
adcs x26,x26,x14
umulh x14,x10,x8
adcs x19,x19,x15
umulh x15,x11,x8
adcs x20,x20,x16
umulh x16,x12,x8
stp x23,x24,[x2],#8*2 // t[4..5]
adc x21,xzr,xzr // t[10]
adds x25,x25,x17
umulh x17,x13,x8
adcs x26,x26,x14
mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv)
adcs x19,x19,x15
mul x15,x11,x9
adcs x20,x20,x16
mul x16,x12,x9
adc x21,x21,x17
mul x17,x13,x9
adds x26,x26,x14
umulh x14,x10,x9 // hi(a[4..7]*a[3])
adcs x19,x19,x15
umulh x15,x11,x9
adcs x20,x20,x16
umulh x16,x12,x9
adcs x21,x21,x17
umulh x17,x13,x9
stp x25,x26,[x2],#8*2 // t[6..7]
adc x22,xzr,xzr // t[11]
adds x19,x19,x14
mul x14,x11,x10 // lo(a[5..7]*a[4]) (v)
adcs x20,x20,x15
mul x15,x12,x10
adcs x21,x21,x16
mul x16,x13,x10
adc x22,x22,x17
umulh x17,x11,x10 // hi(a[5..7]*a[4])
adds x20,x20,x14
umulh x14,x12,x10
adcs x21,x21,x15
umulh x15,x13,x10
adcs x22,x22,x16
mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi)
adc x23,xzr,xzr // t[12]
adds x21,x21,x17
mul x17,x13,x11
adcs x22,x22,x14
umulh x14,x12,x11 // hi(a[6..7]*a[5])
adc x23,x23,x15
umulh x15,x13,x11
adds x22,x22,x16
mul x16,x13,x12 // lo(a[7]*a[6]) (vii)
adcs x23,x23,x17
umulh x17,x13,x12 // hi(a[7]*a[6])
adc x24,xzr,xzr // t[13]
adds x23,x23,x14
sub x27,x3,x1 // done yet?
adc x24,x24,x15
adds x24,x24,x16
sub x14,x3,x5 // rewinded ap
adc x25,xzr,xzr // t[14]
add x25,x25,x17
cbz x27,Lsqr8x_outer_break
mov x4,x6
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x0,x1
adcs x26,xzr,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved below
mov x27,#-8*8
// a[8]a[0]
// a[9]a[0]
// a[a]a[0]
// a[b]a[0]
// a[c]a[0]
// a[d]a[0]
// a[e]a[0]
// a[f]a[0]
// a[8]a[1]
// a[f]a[1]........................
// a[8]a[2]
// a[f]a[2]........................
// a[8]a[3]
// a[f]a[3]........................
// a[8]a[4]
// a[f]a[4]........................
// a[8]a[5]
// a[f]a[5]........................
// a[8]a[6]
// a[f]a[6]........................
// a[8]a[7]
// a[f]a[7]........................
Lsqr8x_mul:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_mul
// note that carry flag is guaranteed
// to be zero at this point
cmp x1,x3 // done yet?
b.eq Lsqr8x_break
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
ldr x4,[x0,#-8*8]
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_mul
.align 4
Lsqr8x_break:
ldp x6,x7,[x0,#8*0]
add x1,x0,#8*8
ldp x8,x9,[x0,#8*2]
sub x14,x3,x1 // is it last iteration?
ldp x10,x11,[x0,#8*4]
sub x15,x2,x14
ldp x12,x13,[x0,#8*6]
cbz x14,Lsqr8x_outer_loop
stp x19,x20,[x2,#8*0]
ldp x19,x20,[x15,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x15,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x15,#8*4]
stp x25,x26,[x2,#8*6]
mov x2,x15
ldp x25,x26,[x15,#8*6]
b Lsqr8x_outer_loop
.align 4
Lsqr8x_outer_break:
// Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0]
ldp x15,x16,[sp,#8*1]
ldp x11,x13,[x14,#8*2]
add x1,x14,#8*4
ldp x17,x14,[sp,#8*3]
stp x19,x20,[x2,#8*0]
mul x19,x7,x7
stp x21,x22,[x2,#8*2]
umulh x7,x7,x7
stp x23,x24,[x2,#8*4]
mul x8,x9,x9
stp x25,x26,[x2,#8*6]
mov x2,sp
umulh x9,x9,x9
adds x20,x7,x15,lsl#1
extr x15,x16,x15,#63
sub x27,x5,#8*4
Lsqr4x_shift_n_add:
adcs x21,x8,x15
extr x16,x17,x16,#63
sub x27,x27,#8*4
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
ldp x7,x9,[x1],#8*2
umulh x11,x11,x11
mul x12,x13,x13
umulh x13,x13,x13
extr x17,x14,x17,#63
stp x19,x20,[x2,#8*0]
adcs x23,x10,x17
extr x14,x15,x14,#63
stp x21,x22,[x2,#8*2]
adcs x24,x11,x14
ldp x17,x14,[x2,#8*7]
extr x15,x16,x15,#63
adcs x25,x12,x15
extr x16,x17,x16,#63
adcs x26,x13,x16
ldp x15,x16,[x2,#8*9]
mul x6,x7,x7
ldp x11,x13,[x1],#8*2
umulh x7,x7,x7
mul x8,x9,x9
umulh x9,x9,x9
stp x23,x24,[x2,#8*4]
extr x17,x14,x17,#63
stp x25,x26,[x2,#8*6]
add x2,x2,#8*8
adcs x19,x6,x17
extr x14,x15,x14,#63
adcs x20,x7,x14
ldp x17,x14,[x2,#8*3]
extr x15,x16,x15,#63
cbnz x27,Lsqr4x_shift_n_add
ldp x1,x4,[x29,#104] // pull np and n0
adcs x21,x8,x15
extr x16,x17,x16,#63
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
umulh x11,x11,x11
stp x19,x20,[x2,#8*0]
mul x12,x13,x13
umulh x13,x13,x13
stp x21,x22,[x2,#8*2]
extr x17,x14,x17,#63
adcs x23,x10,x17
extr x14,x15,x14,#63
ldp x19,x20,[sp,#8*0]
adcs x24,x11,x14
extr x15,x16,x15,#63
ldp x6,x7,[x1,#8*0]
adcs x25,x12,x15
extr x16,xzr,x16,#63
ldp x8,x9,[x1,#8*2]
adc x26,x13,x16
ldp x10,x11,[x1,#8*4]
// Reduce by 512 bits per iteration
mul x28,x4,x19 // t[0]*n0
ldp x12,x13,[x1,#8*6]
add x3,x1,x5
ldp x21,x22,[sp,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[sp,#8*4]
stp x25,x26,[x2,#8*6]
ldp x25,x26,[sp,#8*6]
add x1,x1,#8*8
mov x30,xzr // initial top-most carry
mov x2,sp
mov x27,#8
Lsqr8x_reduction:
// (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0)
mul x15,x7,x28
sub x27,x27,#1
mul x16,x8,x28
str x28,[x2],#8 // put aside t[0]*n0 for tail processing
mul x17,x9,x28
// (*) adds xzr,x19,x14
subs xzr,x19,#1 // (*)
mul x14,x10,x28
adcs x19,x20,x15
mul x15,x11,x28
adcs x20,x21,x16
mul x16,x12,x28
adcs x21,x22,x17
mul x17,x13,x28
adcs x22,x23,x14
umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0)
adcs x23,x24,x15
umulh x15,x7,x28
adcs x24,x25,x16
umulh x16,x8,x28
adcs x25,x26,x17
umulh x17,x9,x28
adc x26,xzr,xzr
adds x19,x19,x14
umulh x14,x10,x28
adcs x20,x20,x15
umulh x15,x11,x28
adcs x21,x21,x16
umulh x16,x12,x28
adcs x22,x22,x17
umulh x17,x13,x28
mul x28,x4,x19 // next t[0]*n0
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adc x26,x26,x17
cbnz x27,Lsqr8x_reduction
ldp x14,x15,[x2,#8*0]
ldp x16,x17,[x2,#8*2]
mov x0,x2
sub x27,x3,x1 // done yet?
adds x19,x19,x14
adcs x20,x20,x15
ldp x14,x15,[x2,#8*4]
adcs x21,x21,x16
adcs x22,x22,x17
ldp x16,x17,[x2,#8*6]
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adcs x26,x26,x17
//adc x28,xzr,xzr // moved below
cbz x27,Lsqr8x8_post_condition
ldr x4,[x2,#-8*8]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
mov x27,#-8*8
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
Lsqr8x_tail:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_tail
// note that carry flag is guaranteed
// to be zero at this point
ldp x6,x7,[x2,#8*0]
sub x27,x3,x1 // done yet?
sub x16,x3,x5 // rewinded np
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
cbz x27,Lsqr8x_tail_break
ldr x4,[x0,#-8*8]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_tail
.align 4
Lsqr8x_tail_break:
ldr x4,[x29,#112] // pull n0
add x27,x2,#8*8 // end of current t[num] window
subs xzr,x30,#1 // "move" top-most carry to carry bit
adcs x14,x19,x6
adcs x15,x20,x7
ldp x19,x20,[x0,#8*0]
adcs x21,x21,x8
ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0]
adcs x22,x22,x9
ldp x8,x9,[x16,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x16,#8*4]
adcs x25,x25,x12
adcs x26,x26,x13
ldp x12,x13,[x16,#8*6]
add x1,x16,#8*8
adc x30,xzr,xzr // top-most carry
mul x28,x4,x19
stp x14,x15,[x2,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x0,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x0,#8*4]
cmp x27,x29 // did we hit the bottom?
stp x25,x26,[x2,#8*6]
mov x2,x0 // slide the window
ldp x25,x26,[x0,#8*6]
mov x27,#8
b.ne Lsqr8x_reduction
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x0,[x29,#96] // pull rp
add x2,x2,#8*8
subs x14,x19,x6
sbcs x15,x20,x7
sub x27,x5,#8*8
mov x3,x0 // x0 copy
Lsqr8x_sub:
sbcs x16,x21,x8
ldp x6,x7,[x1,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x1,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x10,x11,[x1,#8*4]
sbcs x17,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
ldp x19,x20,[x2,#8*0]
sub x27,x27,#8*8
ldp x21,x22,[x2,#8*2]
ldp x23,x24,[x2,#8*4]
ldp x25,x26,[x2,#8*6]
add x2,x2,#8*8
stp x14,x15,[x0,#8*4]
sbcs x14,x19,x6
stp x16,x17,[x0,#8*6]
add x0,x0,#8*8
sbcs x15,x20,x7
cbnz x27,Lsqr8x_sub
sbcs x16,x21,x8
mov x2,sp
add x1,sp,x5
ldp x6,x7,[x3,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x3,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x19,x20,[x1,#8*0]
sbcs x17,x26,x13
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
stp x14,x15,[x0,#8*4]
stp x16,x17,[x0,#8*6]
sub x27,x5,#8*4
Lsqr4x_cond_copy:
sub x27,x27,#8*4
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
ldp x6,x7,[x3,#8*4]
ldp x19,x20,[x1,#8*4]
csel x16,x21,x8,lo
stp xzr,xzr,[x2,#8*2]
add x2,x2,#8*4
csel x17,x22,x9,lo
ldp x8,x9,[x3,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
add x3,x3,#8*4
stp xzr,xzr,[x1,#8*0]
stp xzr,xzr,[x1,#8*2]
cbnz x27,Lsqr4x_cond_copy
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
stp xzr,xzr,[x2,#8*2]
csel x16,x21,x8,lo
csel x17,x22,x9,lo
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
b Lsqr8x_done
.align 4
Lsqr8x8_post_condition:
adc x28,xzr,xzr
ldr x30,[x29,#8] // pull return address
// x19-7,x28 hold result, x6-7 hold modulus
subs x6,x19,x6
ldr x1,[x29,#96] // pull rp
sbcs x7,x20,x7
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x8
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x9
stp xzr,xzr,[sp,#8*4]
sbcs x10,x23,x10
stp xzr,xzr,[sp,#8*6]
sbcs x11,x24,x11
stp xzr,xzr,[sp,#8*8]
sbcs x12,x25,x12
stp xzr,xzr,[sp,#8*10]
sbcs x13,x26,x13
stp xzr,xzr,[sp,#8*12]
sbcs x28,x28,xzr // did it borrow?
stp xzr,xzr,[sp,#8*14]
// x6-7 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
csel x10,x23,x10,lo
csel x11,x24,x11,lo
stp x8,x9,[x1,#8*2]
csel x12,x25,x12,lo
csel x13,x26,x13,lo
stp x10,x11,[x1,#8*4]
stp x12,x13,[x1,#8*6]
Lsqr8x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl bn_mul4x_mont
.def bn_mul4x_mont
.type 32
.endef
.align 5
bn_mul4x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub x26,sp,x5,lsl#3
lsl x5,x5,#3
ldr x4,[x4] // *n0
sub sp,x26,#8*4 // alloca
add x10,x2,x5
add x27,x1,x5
stp x0,x10,[x29,#96] // offload rp and &b[num]
ldr x24,[x2,#8*0] // b[0]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
ldp x14,x15,[x3,#8*0] // n[0..3]
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
mov x28,#0
mov x26,sp
Loop_mul4x_1st_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[0])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[0])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
// (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0)
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0)
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
sub x10,x27,x1
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_reduction
cbz x10,Lmul4x4_post_condition
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldr x25,[sp] // a[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
Loop_mul4x_1st_tail:
mul x10,x6,x24 // lo(a[4..7]*b[i])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[i])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*a[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
adcs x23,x23,x0
umulh x13,x17,x25
adc x0,xzr,xzr
ldr x25,[sp,x28] // next t[0]*n0
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_tail
sub x11,x27,x5 // rewinded x1
cbz x10,Lmul4x_proceed
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_1st_tail
.align 5
Lmul4x_proceed:
ldr x24,[x2,#8*4]! // *++b
adc x30,x0,xzr
ldp x6,x7,[x11,#8*0] // a[0..3]
sub x3,x3,x5 // rewind np
ldp x8,x9,[x11,#8*2]
add x1,x11,#8*4
stp x19,x20,[x26,#8*0] // result!!!
ldp x19,x20,[sp,#8*4] // t[0..3]
stp x21,x22,[x26,#8*2] // result!!!
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x3,#8*0] // n[0..3]
mov x26,sp
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
.align 4
Loop_mul4x_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[4])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
// (*) mul x10,x14,x25
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25 // lo(n[0..3]*t[0]*n0
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_reduction
adc x0,x0,xzr
ldp x10,x11,[x26,#8*4] // t[4..7]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldr x25,[sp] // t[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.align 4
Loop_mul4x_tail:
mul x10,x6,x24 // lo(a[4..7]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[4])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*t[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
umulh x13,x17,x25
adcs x23,x23,x0
ldr x25,[sp,x28] // next a[0]*n0
adc x0,xzr,xzr
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_tail
sub x11,x3,x5 // rewinded np?
adc x0,x0,xzr
cbz x10,Loop_mul4x_break
ldp x10,x11,[x26,#8*4]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_tail
.align 4
Loop_mul4x_break:
ldp x12,x13,[x29,#96] // pull rp and &b[num]
adds x19,x19,x30
add x2,x2,#8*4 // bp++
adcs x20,x20,xzr
sub x1,x1,x5 // rewind ap
adcs x21,x21,xzr
stp x19,x20,[x26,#8*0] // result!!!
adcs x22,x22,xzr
ldp x19,x20,[sp,#8*4] // t[0..3]
adc x30,x0,xzr
stp x21,x22,[x26,#8*2] // result!!!
cmp x2,x13 // done yet?
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x11,#8*0] // n[0..3]
ldp x16,x17,[x11,#8*2]
add x3,x11,#8*4
b.eq Lmul4x_post
ldr x24,[x2]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
adds x1,x1,#8*4 // clear carry bit
mov x0,xzr
mov x26,sp
b Loop_mul4x_reduction
.align 4
Lmul4x_post:
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
mov x0,x12
mov x27,x12 // x0 copy
subs x10,x19,x14
add x26,sp,#8*8
sbcs x11,x20,x15
sub x28,x5,#8*4
Lmul4x_sub:
sbcs x12,x21,x16
ldp x14,x15,[x3,#8*0]
sub x28,x28,#8*4
ldp x19,x20,[x26,#8*0]
sbcs x13,x22,x17
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
ldp x21,x22,[x26,#8*2]
add x26,x26,#8*4
stp x10,x11,[x0,#8*0]
sbcs x10,x19,x14
stp x12,x13,[x0,#8*2]
add x0,x0,#8*4
sbcs x11,x20,x15
cbnz x28,Lmul4x_sub
sbcs x12,x21,x16
mov x26,sp
add x1,sp,#8*4
ldp x6,x7,[x27,#8*0]
sbcs x13,x22,x17
stp x10,x11,[x0,#8*0]
ldp x8,x9,[x27,#8*2]
stp x12,x13,[x0,#8*2]
ldp x19,x20,[x1,#8*0]
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
sub x28,x5,#8*4
Lmul4x_cond_copy:
sub x28,x28,#8*4
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
ldp x6,x7,[x27,#8*4]
ldp x19,x20,[x1,#8*4]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*2]
add x26,x26,#8*4
csel x13,x22,x9,lo
ldp x8,x9,[x27,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
add x27,x27,#8*4
cbnz x28,Lmul4x_cond_copy
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
stp xzr,xzr,[x26,#8*2]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*3]
csel x13,x22,x9,lo
stp xzr,xzr,[x26,#8*4]
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
b Lmul4x_done
.align 4
Lmul4x4_post_condition:
adc x0,x0,xzr
ldr x1,[x29,#96] // pull rp
// x19-3,x0 hold result, x14-7 hold modulus
subs x6,x19,x14
ldr x30,[x29,#8] // pull return address
sbcs x7,x20,x15
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x16
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x17
stp xzr,xzr,[sp,#8*4]
sbcs xzr,x0,xzr // did it borrow?
stp xzr,xzr,[sp,#8*6]
// x6-3 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
stp x8,x9,[x1,#8*2]
Lmul4x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 4
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mktmansour/MKT-KSA-Geolocation-Security
| 73,987
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha20_poly1305_armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.section .rodata
.align 7
Lchacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
Linc:
.long 1,2,3,4
Lrol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
Lclamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.text
.def Lpoly_hash_ad_internal
.type 32
.endef
.align 6
Lpoly_hash_ad_internal:
.cfi_startproc
cbnz x4, Lpoly_hash_intro
ret
Lpoly_hash_intro:
cmp x4, #16
b.lt Lpoly_hash_ad_tail
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b Lpoly_hash_ad_internal
Lpoly_hash_ad_tail:
cbz x4, Lpoly_hash_ad_ret
eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD
sub x4, x4, #1
Lpoly_hash_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, x4]
mov v20.b[0], w11
subs x4, x4, #1
b.ge Lpoly_hash_tail_16_compose
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lpoly_hash_ad_ret:
ret
.cfi_endproc
/////////////////////////////////
//
// void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data);
//
.globl chacha20_poly1305_seal
.def chacha20_poly1305_seal
.type 32
.endef
.align 6
chacha20_poly1305_seal:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
ldr x12, [x5, #56] // The total cipher text length includes extra_in_len
add x12, x12, x2
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x12
cmp x2, #128
b.le Lseal_128 // Optimization for smaller buffers
// Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext,
// and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically,
// the fifth block (A4-D4) horizontally.
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
sub x5, x5, #32
mov x6, #10
.align 5
Lseal_init_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.hi Lseal_init_rounds
add v15.4s, v15.4s, v25.4s
mov x11, #4
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
and v4.16b, v4.16b, v27.16b
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
mov x16, v4.d[0] // Move the R key to GPRs
mov x17, v4.d[1]
mov v27.16b, v9.16b // Store the S key
bl Lpoly_hash_ad_internal
mov x3, x0
cmp x2, #256
b.le Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #256
mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds
mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256
Lseal_main_loop:
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
sub x5, x5, #32
.align 5
Lseal_main_loop_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.ge Lseal_main_loop_rounds
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
subs x7, x7, #1
b.gt Lseal_main_loop_rounds
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
cmp x2, #320
b.le Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #320
mov x6, #0
mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration
b Lseal_main_loop
Lseal_tail:
// This part of the function handles the storage and authentication of the last [0,320) bytes
// We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data.
cmp x2, #64
b.lt Lseal_tail_64
// Store and authenticate 64B blocks per iteration
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
// Shift the state left by 64 bytes for the next iteration of the loop
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
mov v1.16b, v2.16b
mov v6.16b, v7.16b
mov v11.16b, v12.16b
mov v16.16b, v17.16b
mov v2.16b, v3.16b
mov v7.16b, v8.16b
mov v12.16b, v13.16b
mov v17.16b, v18.16b
mov v3.16b, v4.16b
mov v8.16b, v9.16b
mov v13.16b, v14.16b
mov v18.16b, v19.16b
b Lseal_tail
Lseal_tail_64:
ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr
// Here we handle the last [0,64) bytes of plaintext
cmp x2, #16
b.lt Lseal_tail_16
// Each iteration encrypt and authenticate a 16B block
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b}, [x0], #16
sub x2, x2, #16
// Shift the state left by 16 bytes for the next iteration of the loop
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
b Lseal_tail_64
Lseal_tail_16:
// Here we handle the last [0,16) bytes of ciphertext that require a padded block
cbz x2, Lseal_hash_extra
eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes
not v22.16b, v20.16b
mov x6, x2
add x1, x1, x2
cbz x4, Lseal_tail_16_compose // No extra data to pad with, zero padding
mov x7, #16 // We need to load some extra_in first for padding
sub x7, x7, x2
cmp x4, x7
csel x7, x4, x7, lt // Load the minimum of extra_in_len and the amount needed to fill the register
mov x12, x7
add x3, x3, x7
sub x4, x4, x7
Lseal_tail16_compose_extra_in:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x7, x7, #1
b.gt Lseal_tail16_compose_extra_in
add x3, x3, x12
Lseal_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x1, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt Lseal_tail_16_compose
and v0.16b, v0.16b, v21.16b
eor v20.16b, v20.16b, v0.16b
mov v21.16b, v20.16b
Lseal_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt Lseal_tail_16_store
// Hash in the final ct block concatenated with extra_in
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lseal_hash_extra:
cbz x4, Lseal_finalize
Lseal_hash_extra_loop:
cmp x4, #16
b.lt Lseal_hash_extra_tail
ld1 {v20.16b}, [x3], #16
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b Lseal_hash_extra_loop
Lseal_hash_extra_tail:
cbz x4, Lseal_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext
add x3, x3, x4
Lseal_hash_extra_load:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x4, x4, #1
b.gt Lseal_hash_extra_load
// Hash in the final padded extra_in blcok
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lseal_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
Lseal_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
Lseal_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi Lseal_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
// Only the first 32 bytes of the third block (counter = 0) are needed,
// so skip updating v12 and v17.
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl Lpoly_hash_ad_internal
b Lseal_tail
.cfi_endproc
/////////////////////////////////
//
// void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data);
//
.globl chacha20_poly1305_open
.def chacha20_poly1305_open
.type 32
.endef
.align 6
chacha20_poly1305_open:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x2
cmp x2, #128
b.le Lopen_128 // Optimization for smaller buffers
// Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
mov x6, #10
.align 5
Lopen_init_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.hi Lopen_init_rounds
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
and v0.16b, v0.16b, v27.16b
mov x16, v0.d[0] // Move the R key to GPRs
mov x17, v0.d[1]
mov v27.16b, v5.16b // Store the S key
bl Lpoly_hash_ad_internal
Lopen_ad_done:
mov x3, x1
// Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes
Lopen_main_loop:
cmp x2, #192
b.lt Lopen_tail
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
sub x5, x5, #32
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12
sub x4, x4, #10
mov x7, #10
subs x6, x7, x4
subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full
cbz x7, Lopen_main_loop_rounds_short
.align 5
Lopen_main_loop_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lopen_main_loop_rounds_short:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x7, x7, #1
b.gt Lopen_main_loop_rounds
subs x6, x6, #1
b.ge Lopen_main_loop_rounds_short
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
// We can always safely store 192 bytes
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #192
mov v0.16b, v3.16b
mov v5.16b, v8.16b
mov v10.16b, v13.16b
mov v15.16b, v18.16b
cmp x2, #64
b.lt Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v4.16b
mov v5.16b, v9.16b
mov v10.16b, v14.16b
mov v15.16b, v19.16b
cmp x2, #64
b.lt Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b Lopen_main_loop
Lopen_tail:
cbz x2, Lopen_finalize
lsr x4, x2, #4 // How many whole blocks we have to hash
cmp x2, #64
b.le Lopen_tail_64
cmp x2, #128
b.le Lopen_tail_128
Lopen_tail_192:
// We need three more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
mov v17.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v21.16b, v21.16b, v21.16b
ins v23.s[0], v25.s[0]
ins v21.d[0], x15
add v22.4s, v23.4s, v21.4s
add v21.4s, v22.4s, v21.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
mov x7, #10
subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing
sub x4, x4, x7
cbz x7, Lopen_tail_192_rounds_no_hash
Lopen_tail_192_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lopen_tail_192_rounds_no_hash:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x7, x7, #1
b.gt Lopen_tail_192_rounds
subs x6, x6, #1
b.ge Lopen_tail_192_rounds_no_hash
// We hashed 160 bytes at most, may still have 32 bytes left
Lopen_tail_192_hash:
cbz x4, Lopen_tail_192_hash_done
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b Lopen_tail_192_hash
Lopen_tail_192_hash_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v12.4s, v12.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v17.4s, v17.4s, v30.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #128
b Lopen_tail_64_store
Lopen_tail_128:
// We need two more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v22.16b, v22.16b, v22.16b
ins v23.s[0], v25.s[0]
ins v22.d[0], x15
add v22.4s, v22.4s, v23.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
mov x6, #10
sub x6, x6, x4
Lopen_tail_128_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #4
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #12
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #4
subs x6, x6, #1
b.gt Lopen_tail_128_rounds
cbz x4, Lopen_tail_128_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b Lopen_tail_128_rounds
Lopen_tail_128_rounds_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b Lopen_tail_64_store
Lopen_tail_64:
// We just need a single block
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
ins v23.s[0], v25.s[0]
add v15.4s, v15.4s, v23.4s
mov x6, #10
sub x6, x6, x4
Lopen_tail_64_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.gt Lopen_tail_64_rounds
cbz x4, Lopen_tail_64_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b Lopen_tail_64_rounds
Lopen_tail_64_rounds_done:
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v15.4s, v15.4s, v23.4s
Lopen_tail_64_store:
cmp x2, #16
b.lt Lopen_tail_16
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
st1 {v20.16b}, [x0], #16
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
sub x2, x2, #16
b Lopen_tail_64_store
Lopen_tail_16:
// Here we handle the last [0,16) bytes that require a padded block
cbz x2, Lopen_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask
not v22.16b, v20.16b
add x7, x1, x2
mov x6, x2
Lopen_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x7, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt Lopen_tail_16_compose
and v20.16b, v20.16b, v21.16b
// Hash in the final padded block
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
Lopen_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt Lopen_tail_16_store
Lopen_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
Lopen_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
Lopen_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi Lopen_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl Lpoly_hash_ad_internal
Lopen_128_store:
cmp x2, #64
b.lt Lopen_128_store_64
ld1 {v20.16b - v23.16b}, [x1], #64
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
Lopen_128_store_64:
lsr x4, x2, #4
mov x3, x1
Lopen_128_hash_64:
cbz x4, Lopen_tail_64_store
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b Lopen_128_hash_64
.cfi_endproc
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mktmansour/MKT-KSA-Geolocation-Security
| 82,176
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aesv8-gcm-armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#if __ARM_MAX_ARCH__ >= 8
.arch armv8-a+crypto
.text
.globl aes_gcm_enc_kernel
.def aes_gcm_enc_kernel
.type 32
.endef
.align 4
aes_gcm_enc_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
add x4, x0, x1, lsr #3 // end_input_ptr
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
sub x5, x5, #1 // byte_len - 1
ldr q18, [x8, #0] // load rk0
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
ldr q25, [x8, #112] // load rk7
add x5, x5, x0
lsr x12, x11, #32
fmov d2, x10 // CTR block 2
orr w11, w11, w11
rev w12, w12 // rev_ctr32
fmov d1, x10 // CTR block 1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
add w12, w12, #1 // increment rev_ctr32
rev w9, w12 // CTR block 1
fmov d3, x10 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 1
add w12, w12, #1 // CTR block 1
ldr q19, [x8, #16] // load rk1
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
ldr q20, [x8, #32] // load rk2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
orr x9, x11, x9, lsl #32 // CTR block 3
fmov v3.d[1], x9 // CTR block 3
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q21, [x8, #48] // load rk3
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q24, [x8, #96] // load rk6
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q23, [x8, #80] // load rk5
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q22, [x8, #64] // load rk4
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
ldr q29, [x8, #176] // load rk11
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
ldr q26, [x8, #128] // load rk8
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
add w12, w12, #1 // CTR block 3
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
ldr q27, [x8, #144] // load rk9
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
ldr q28, [x8, #160] // load rk10
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
b.lt Lenc_finish_first_blocks // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
b.eq Lenc_finish_first_blocks // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Lenc_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v2.16b, v31.16b // AES block 2 - round N-1
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
aese v3.16b, v31.16b // AES block 3 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
b.ge Lenc_tail // handle tail
ldp x19, x20, [x0, #16] // AES block 1 - load plaintext
rev w9, w12 // CTR block 4
ldp x6, x7, [x0, #0] // AES block 0 - load plaintext
ldp x23, x24, [x0, #48] // AES block 3 - load plaintext
ldp x21, x22, [x0, #32] // AES block 2 - load plaintext
add x0, x0, #64 // AES input_ptr update
eor x19, x19, x13 // AES block 1 - round N low
eor x20, x20, x14 // AES block 1 - round N high
fmov d5, x19 // AES block 1 - mov low
eor x6, x6, x13 // AES block 0 - round N low
eor x7, x7, x14 // AES block 0 - round N high
eor x24, x24, x14 // AES block 3 - round N high
fmov d4, x6 // AES block 0 - mov low
cmp x0, x5 // check if we have <= 8 blocks
fmov v4.d[1], x7 // AES block 0 - mov high
eor x23, x23, x13 // AES block 3 - round N low
eor x21, x21, x13 // AES block 2 - round N low
fmov v5.d[1], x20 // AES block 1 - mov high
fmov d6, x21 // AES block 2 - mov low
add w12, w12, #1 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov d7, x23 // AES block 3 - mov low
eor x22, x22, x14 // AES block 2 - round N high
fmov v6.d[1], x22 // AES block 2 - mov high
eor v4.16b, v4.16b, v0.16b // AES block 0 - result
fmov d0, x10 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
eor v5.16b, v5.16b, v1.16b // AES block 1 - result
fmov d1, x10 // CTR block 5
orr x9, x11, x9, lsl #32 // CTR block 5
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
st1 { v4.16b}, [x2], #16 // AES block 0 - store result
fmov v7.d[1], x24 // AES block 3 - mov high
orr x9, x11, x9, lsl #32 // CTR block 6
eor v6.16b, v6.16b, v2.16b // AES block 2 - result
st1 { v5.16b}, [x2], #16 // AES block 1 - store result
add w12, w12, #1 // CTR block 6
fmov d2, x10 // CTR block 6
fmov v2.d[1], x9 // CTR block 6
st1 { v6.16b}, [x2], #16 // AES block 2 - store result
rev w9, w12 // CTR block 7
orr x9, x11, x9, lsl #32 // CTR block 7
eor v7.16b, v7.16b, v3.16b // AES block 3 - result
st1 { v7.16b}, [x2], #16 // AES block 3 - store result
b.ge Lenc_prepretail // do prepretail
Lenc_main_loop: // main loop start
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d3, x10 // CTR block 4k+3
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
fmov v3.d[1], x9 // CTR block 4k+3
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
eor v4.16b, v4.16b, v11.16b // PRE 1
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x23, x23, x13 // AES block 4k+7 - round N low
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d10, v17.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
eor x22, x22, x14 // AES block 4k+6 - round N high
mov d8, v4.d[1] // GHASH block 4k - mid
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor x19, x19, x13 // AES block 4k+5 - round N low
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
eor x21, x21, x13 // AES block 4k+6 - round N low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
movi v8.8b, #0xc2
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
fmov d5, x19 // AES block 4k+5 - mov low
ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext
b.lt Lenc_main_loop_continue // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Lenc_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Lenc_main_loop_continue:
shl d8, d8, #56 // mod_constant
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
add w12, w12, #1 // CTR block 4k+3
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
add x0, x0, #64 // AES input_ptr update
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
rev w9, w12 // CTR block 4k+8
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor x6, x6, x13 // AES block 4k+4 - round N low
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
eor x7, x7, x14 // AES block 4k+4 - round N high
fmov d4, x6 // AES block 4k+4 - mov low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid
eor x20, x20, x14 // AES block 4k+5 - round N high
eor x24, x24, x14 // AES block 4k+7 - round N high
add w12, w12, #1 // CTR block 4k+8
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
fmov d7, x23 // AES block 4k+7 - mov low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
fmov v5.d[1], x20 // AES block 4k+5 - mov high
fmov d6, x21 // AES block 4k+6 - mov low
cmp x0, x5 // LOOP CONTROL
fmov v6.d[1], x22 // AES block 4k+6 - mov high
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
rev w9, w12 // CTR block 4k+9
add w12, w12, #1 // CTR block 4k+9
eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result
fmov d1, x10 // CTR block 4k+9
orr x9, x11, x9, lsl #32 // CTR block 4k+9
fmov v1.d[1], x9 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
rev w9, w12 // CTR block 4k+10
st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result
orr x9, x11, x9, lsl #32 // CTR block 4k+10
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
fmov v7.d[1], x24 // AES block 4k+7 - mov high
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result
fmov d2, x10 // CTR block 4k+10
st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result
fmov v2.d[1], x9 // CTR block 4k+10
rev w9, w12 // CTR block 4k+11
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
orr x9, x11, x9, lsl #32 // CTR block 4k+11
eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result
st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result
b.lt Lenc_main_loop
Lenc_prepretail: // PREPRETAIL
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov d3, x10 // CTR block 4k+3
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
fmov v3.d[1], x9 // CTR block 4k+3
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
add w12, w12, #1 // CTR block 4k+3
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v10.16b, v10.16b, v9.16b // karatsuba tidy up
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
pmull v4.1q, v9.1d, v8.1d
ext v9.16b, v9.16b, v9.16b, #8
eor v10.16b, v10.16b, v11.16b
b.lt Lenc_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
b.eq Lenc_finish_prepretail // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
Lenc_finish_prepretail:
eor v10.16b, v10.16b, v4.16b
eor v10.16b, v10.16b, v9.16b
pmull v4.1q, v10.1d, v8.1d
ext v10.16b, v10.16b, v10.16b, #8
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
eor v11.16b, v11.16b, v4.16b
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b
Lenc_tail: // TAIL
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
cmp x5, #48
fmov d4, x6 // AES block 4k+4 - mov low
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result
b.gt Lenc_blocks_more_than_3
cmp x5, #32
mov v3.16b, v2.16b
movi v11.8b, #0
movi v9.8b, #0
sub w12, w12, #1
mov v2.16b, v1.16b
movi v10.8b, #0
b.gt Lenc_blocks_more_than_2
mov v3.16b, v1.16b
sub w12, w12, #1
cmp x5, #16
b.gt Lenc_blocks_more_than_1
sub w12, w12, #1
b Lenc_blocks_less_than_1
Lenc_blocks_more_than_3: // blocks left > 3
st1 { v5.16b}, [x2], #16 // AES final-3 block - store result
ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-3 block
eor x6, x6, x13 // AES final-2 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor x7, x7, x14 // AES final-2 block - round N high
mov d22, v4.d[1] // GHASH final-3 block - mid
fmov d5, x6 // AES final-2 block - mov low
fmov v5.d[1], x7 // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
mov d10, v17.d[1] // GHASH final-3 block - mid
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor v5.16b, v5.16b, v1.16b // AES final-2 block - result
Lenc_blocks_more_than_2: // blocks left > 2
st1 { v5.16b}, [x2], #16 // AES final-2 block - store result
ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-2 block
eor x6, x6, x13 // AES final-1 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
fmov d5, x6 // AES final-1 block - mov low
eor x7, x7, x14 // AES final-1 block - round N high
fmov v5.d[1], x7 // AES final-1 block - mov high
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
eor v5.16b, v5.16b, v2.16b // AES final-1 block - result
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
Lenc_blocks_more_than_1: // blocks left > 1
st1 { v5.16b}, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ldp x6, x7, [x0], #16 // AES final block - load input low & high
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
eor x6, x6, x13 // AES final block - round N low
mov d22, v4.d[1] // GHASH final-1 block - mid
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor x7, x7, x14 // AES final block - round N high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
fmov d5, x6 // AES final block - mov low
fmov v5.d[1], x7 // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
eor v5.16b, v5.16b, v3.16b // AES final block - result
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
Lenc_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored
mvn x14, xzr // rkN_h = 0xffffffffffffffff
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x6, x13, x14, lt
csel x7, x14, xzr, lt
fmov d0, x6 // ctr0b is mask for last block
fmov v0.d[1], x7
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
mov d8, v4.d[1] // GHASH final block - mid
rev w9, w12
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
str w9, [x16, #12] // store the updated counter
st1 { v5.16b}, [x2] // store all 16B
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl aes_gcm_dec_kernel
.def aes_gcm_dec_kernel
.type 32
.endef
.align 4
aes_gcm_dec_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ldr q26, [x8, #128] // load rk8
sub x5, x5, #1 // byte_len - 1
ldr q25, [x8, #112] // load rk7
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
add x4, x0, x1, lsr #3 // end_input_ptr
ldr q24, [x8, #96] // load rk6
lsr x12, x11, #32
ldr q23, [x8, #80] // load rk5
orr w11, w11, w11
ldr q21, [x8, #48] // load rk3
add x5, x5, x0
rev w12, w12 // rev_ctr32
add w12, w12, #1 // increment rev_ctr32
fmov d3, x10 // CTR block 3
rev w9, w12 // CTR block 1
add w12, w12, #1 // CTR block 1
fmov d1, x10 // CTR block 1
orr x9, x11, x9, lsl #32 // CTR block 1
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
fmov d2, x10 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 3
ldr q18, [x8, #0] // load rk0
fmov v3.d[1], x9 // CTR block 3
add w12, w12, #1 // CTR block 3
ldr q22, [x8, #64] // load rk4
ldr q19, [x8, #16] // load rk1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q20, [x8, #32] // load rk2
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q27, [x8, #144] // load rk9
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q28, [x8, #160] // load rk10
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
ldr q29, [x8, #176] // load rk11
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
b.lt Ldec_finish_first_blocks // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
b.eq Ldec_finish_first_blocks // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Ldec_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v2.16b, v31.16b // AES block 2 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
aese v3.16b, v31.16b // AES block 3 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
b.ge Ldec_tail // handle tail
ldr q4, [x0, #0] // AES block 0 - load ciphertext
ldr q5, [x0, #16] // AES block 1 - load ciphertext
rev w9, w12 // CTR block 4
eor v0.16b, v4.16b, v0.16b // AES block 0 - result
eor v1.16b, v5.16b, v1.16b // AES block 1 - result
rev64 v5.16b, v5.16b // GHASH block 1
ldr q7, [x0, #48] // AES block 3 - load ciphertext
mov x7, v0.d[1] // AES block 0 - mov high
mov x6, v0.d[0] // AES block 0 - mov low
rev64 v4.16b, v4.16b // GHASH block 0
add w12, w12, #1 // CTR block 4
fmov d0, x10 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
mov x19, v1.d[0] // AES block 1 - mov low
orr x9, x11, x9, lsl #32 // CTR block 5
mov x20, v1.d[1] // AES block 1 - mov high
eor x7, x7, x14 // AES block 0 - round N high
eor x6, x6, x13 // AES block 0 - round N low
stp x6, x7, [x2], #16 // AES block 0 - store result
fmov d1, x10 // CTR block 5
ldr q6, [x0, #32] // AES block 2 - load ciphertext
add x0, x0, #64 // AES input_ptr update
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
add w12, w12, #1 // CTR block 6
eor x19, x19, x13 // AES block 1 - round N low
orr x9, x11, x9, lsl #32 // CTR block 6
eor x20, x20, x14 // AES block 1 - round N high
stp x19, x20, [x2], #16 // AES block 1 - store result
eor v2.16b, v6.16b, v2.16b // AES block 2 - result
cmp x0, x5 // check if we have <= 8 blocks
b.ge Ldec_prepretail // do prepretail
Ldec_main_loop: // main loop start
mov x21, v2.d[0] // AES block 4k+2 - mov low
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
eor v4.16b, v4.16b, v11.16b // PRE 1
rev w9, w12 // CTR block 4k+7
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x23, v3.d[0] // AES block 4k+3 - mov low
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov v3.d[1], x9 // CTR block 4k+7
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor x22, x22, x14 // AES block 4k+2 - round N high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
rev64 v6.16b, v6.16b // GHASH block 4k+2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x21, x21, x13 // AES block 4k+2 - round N low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
rev64 v7.16b, v7.16b // GHASH block 4k+3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor x23, x23, x13 // AES block 4k+3 - round N low
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor x24, x24, x14 // AES block 4k+3 - round N high
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
add w12, w12, #1 // CTR block 4k+7
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
rev w9, w12 // CTR block 4k+8
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
add w12, w12, #1 // CTR block 4k+8
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
movi v8.8b, #0xc2
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
b.lt Ldec_main_loop_continue // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Ldec_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_main_loop_continue:
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext
eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext
ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext
mov x7, v0.d[1] // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
add x0, x0, #64 // AES input_ptr update
mov x6, v0.d[0] // AES block 4k+4 - mov low
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result
rev w9, w12 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+9
cmp x0, x5 // LOOP CONTROL
add w12, w12, #1 // CTR block 4k+9
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
mov x20, v1.d[1] // AES block 4k+5 - mov high
eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
mov x19, v1.d[0] // AES block 4k+5 - mov low
fmov d1, x10 // CTR block 4k+9
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
fmov v1.d[1], x9 // CTR block 4k+9
rev w9, w12 // CTR block 4k+10
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+10
rev64 v5.16b, v5.16b // GHASH block 4k+5
eor x20, x20, x14 // AES block 4k+5 - round N high
stp x6, x7, [x2], #16 // AES block 4k+4 - store result
eor x19, x19, x13 // AES block 4k+5 - round N low
stp x19, x20, [x2], #16 // AES block 4k+5 - store result
rev64 v4.16b, v4.16b // GHASH block 4k+4
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
b.lt Ldec_main_loop
Ldec_prepretail: // PREPRETAIL
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
mov x21, v2.d[0] // AES block 4k+2 - mov low
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
rev w9, w12 // CTR block 4k+7
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v6.16b, v6.16b // GHASH block 4k+2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
mov x23, v3.d[0] // AES block 4k+3 - mov low
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
fmov v3.d[1], x9 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
rev64 v7.16b, v7.16b // GHASH block 4k+3
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
b.lt Ldec_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
b.eq Ldec_finish_prepretail // branch if AES-192
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_finish_prepretail:
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor x22, x22, x14 // AES block 4k+2 - round N high
eor x23, x23, x13 // AES block 4k+3 - round N low
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
add w12, w12, #1 // CTR block 4k+7
eor x21, x21, x13 // AES block 4k+2 - round N low
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor x24, x24, x14 // AES block 4k+3 - round N high
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
Ldec_tail: // TAIL
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext
eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result
mov x6, v0.d[0] // AES block 4k+4 - mov low
mov x7, v0.d[1] // AES block 4k+4 - mov high
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
cmp x5, #48
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
b.gt Ldec_blocks_more_than_3
sub w12, w12, #1
mov v3.16b, v2.16b
movi v10.8b, #0
movi v11.8b, #0
cmp x5, #32
movi v9.8b, #0
mov v2.16b, v1.16b
b.gt Ldec_blocks_more_than_2
sub w12, w12, #1
mov v3.16b, v1.16b
cmp x5, #16
b.gt Ldec_blocks_more_than_1
sub w12, w12, #1
b Ldec_blocks_less_than_1
Ldec_blocks_more_than_3: // blocks left > 3
rev64 v4.16b, v5.16b // GHASH final-3 block
ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext
stp x6, x7, [x2], #16 // AES final-3 block - store result
mov d10, v17.d[1] // GHASH final-3 block - mid
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor v0.16b, v5.16b, v1.16b // AES final-2 block - result
mov d22, v4.d[1] // GHASH final-3 block - mid
mov x6, v0.d[0] // AES final-2 block - mov low
mov x7, v0.d[1] // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor x6, x6, x13 // AES final-2 block - round N low
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
eor x7, x7, x14 // AES final-2 block - round N high
Ldec_blocks_more_than_2: // blocks left > 2
rev64 v4.16b, v5.16b // GHASH final-2 block
ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
stp x6, x7, [x2], #16 // AES final-2 block - store result
eor v0.16b, v5.16b, v2.16b // AES final-1 block - result
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
mov x6, v0.d[0] // AES final-1 block - mov low
mov x7, v0.d[1] // AES final-1 block - mov high
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
movi v8.8b, #0 // suppress further partial tag feed in
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
eor x6, x6, x13 // AES final-1 block - round N low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
eor x7, x7, x14 // AES final-1 block - round N high
Ldec_blocks_more_than_1: // blocks left > 1
stp x6, x7, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
mov d22, v4.d[1] // GHASH final-1 block - mid
eor v0.16b, v5.16b, v3.16b // AES final block - result
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
mov x6, v0.d[0] // AES final block - mov low
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
mov x7, v0.d[1] // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
eor x6, x6, x13 // AES final block - round N low
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor x7, x7, x14 // AES final block - round N high
Ldec_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x14, xzr // rkN_h = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
ldp x4, x5, [x2] // load existing bytes we need to not overwrite
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x9, x13, x14, lt
csel x10, x14, xzr, lt
fmov d0, x9 // ctr0b is mask for last block
and x6, x6, x9
mov v0.d[1], x10
bic x4, x4, x9 // mask out low existing bytes
rev w9, w12
bic x5, x5, x10 // mask out high existing bytes
orr x6, x6, x4
and x7, x7, x10
orr x7, x7, x5
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
mov d8, v4.d[1] // GHASH final block - mid
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
stp x6, x7, [x2]
str w9, [x16, #12] // store the updated counter
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mktmansour/MKT-KSA-Geolocation-Security
| 60,192
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha256-armv4-linux32.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ https://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ See the License for the specific language governing permissions and
@ limitations under the License.
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project.
@ ====================================================================
@ SHA256 block procedure for ARMv4. May 2007.
@ Performance is ~2x better than gcc 3.4 generated code and in "abso-
@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
@ byte [on single-issue Xscale PXA250 core].
@ July 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 22% improvement on
@ Cortex A8 core and ~20 cycles per processed byte.
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 16%
@ improvement on Cortex A8 core and ~15.4 cycles per processed byte.
@ September 2013.
@
@ Add NEON implementation. On Cortex A8 it was measured to process one
@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
@ code (meaning that latter performs sub-optimally, nothing was done
@ about it).
@ May 2014.
@
@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
#ifdef __KERNEL__
# define __ARM_ARCH __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ 7
#endif
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors. It does have ARMv8-only code, but those
@ instructions are manually-encoded. (See unsha256.)
.arch armv7-a
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.type K256,%object
.align 5
K256:
.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.size K256,.-K256
.word 0 @ terminator
.align 5
.globl sha256_block_data_order_nohw
.hidden sha256_block_data_order_nohw
.type sha256_block_data_order_nohw,%function
sha256_block_data_order_nohw:
add r2,r1,r2,lsl#6 @ len to point at the end of inp
stmdb sp!,{r0,r1,r2,r4-r11,lr}
ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
adr r14,K256
sub sp,sp,#16*4 @ alloca(X[16])
.Loop:
# if __ARM_ARCH>=7
ldr r2,[r1],#4
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ magic
eor r12,r12,r12
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 0
# if 0==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 0
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 0==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r8,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#0*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 0==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 0<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#2*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#15*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 1
# if 1==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 1
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 1==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r7,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#1*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 1==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 1<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#3*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#0*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 2
# if 2==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 2
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 2==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r6,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#2*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 2==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 2<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#4*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#1*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 3
# if 3==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 3
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 3==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r5,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#3*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 3==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 3<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#5*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#2*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 4
# if 4==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 4
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 4==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r4,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#4*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 4==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 4<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#6*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#3*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 5
# if 5==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 5==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r11,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#5*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 5==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 5<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#7*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#4*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 6
# if 6==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 6
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 6==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r10,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#6*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 6==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 6<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#8*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#5*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 7
# if 7==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 7==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r9,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#7*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 7==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 7<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#9*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#6*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 8
# if 8==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 8
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 8==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r8,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#8*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 8==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 8<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#10*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#7*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 9
# if 9==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 9
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 9==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r7,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#9*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 9==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 9<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#11*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#8*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 10
# if 10==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 10
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 10==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r6,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#10*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 10==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 10<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#12*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#9*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 11
# if 11==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 11
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 11==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r5,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#11*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 11==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 11<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#13*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#10*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 12
# if 12==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 12
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 12==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r4,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#12*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 12==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 12<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#14*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#11*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 13
# if 13==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 13
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 13==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r11,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#13*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 13==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 13<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#15*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#12*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 14
# if 14==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 14
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 14==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r10,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#14*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 14==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 14<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#0*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#13*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 15
# if 15==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 15
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 15==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r9,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#15*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 15==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 15<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#1*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#14*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
.Lrounds_16_xx:
@ ldr r2,[sp,#1*4] @ 16
@ ldr r1,[sp,#14*4]
mov r0,r2,ror#7
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#0*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#9*4]
add r12,r12,r0
eor r0,r8,r8,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r8,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#0*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 16==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 16<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#2*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#15*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#2*4] @ 17
@ ldr r1,[sp,#15*4]
mov r0,r2,ror#7
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#1*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#10*4]
add r3,r3,r0
eor r0,r7,r7,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r7,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#1*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 17==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 17<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#3*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#0*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#3*4] @ 18
@ ldr r1,[sp,#0*4]
mov r0,r2,ror#7
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#2*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#11*4]
add r12,r12,r0
eor r0,r6,r6,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r6,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#2*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 18==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 18<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#4*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#1*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#4*4] @ 19
@ ldr r1,[sp,#1*4]
mov r0,r2,ror#7
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#3*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#12*4]
add r3,r3,r0
eor r0,r5,r5,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r5,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#3*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 19==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 19<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#5*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#2*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#5*4] @ 20
@ ldr r1,[sp,#2*4]
mov r0,r2,ror#7
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#4*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#13*4]
add r12,r12,r0
eor r0,r4,r4,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r4,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#4*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 20==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 20<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#6*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#3*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#6*4] @ 21
@ ldr r1,[sp,#3*4]
mov r0,r2,ror#7
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#5*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#14*4]
add r3,r3,r0
eor r0,r11,r11,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r11,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#5*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 21==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 21<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#7*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#4*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#7*4] @ 22
@ ldr r1,[sp,#4*4]
mov r0,r2,ror#7
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#6*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#15*4]
add r12,r12,r0
eor r0,r10,r10,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r10,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#6*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 22==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 22<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#8*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#5*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#8*4] @ 23
@ ldr r1,[sp,#5*4]
mov r0,r2,ror#7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#7*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#0*4]
add r3,r3,r0
eor r0,r9,r9,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r9,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#7*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 23==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 23<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#9*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#6*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#9*4] @ 24
@ ldr r1,[sp,#6*4]
mov r0,r2,ror#7
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#8*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#1*4]
add r12,r12,r0
eor r0,r8,r8,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r8,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#8*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 24==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 24<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#10*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#7*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#10*4] @ 25
@ ldr r1,[sp,#7*4]
mov r0,r2,ror#7
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#9*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#2*4]
add r3,r3,r0
eor r0,r7,r7,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r7,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#9*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 25==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 25<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#11*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#8*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#11*4] @ 26
@ ldr r1,[sp,#8*4]
mov r0,r2,ror#7
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#10*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#3*4]
add r12,r12,r0
eor r0,r6,r6,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r6,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#10*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 26==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 26<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#12*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#9*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#12*4] @ 27
@ ldr r1,[sp,#9*4]
mov r0,r2,ror#7
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#11*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#4*4]
add r3,r3,r0
eor r0,r5,r5,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r5,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#11*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 27==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 27<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#13*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#10*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#13*4] @ 28
@ ldr r1,[sp,#10*4]
mov r0,r2,ror#7
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#12*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#5*4]
add r12,r12,r0
eor r0,r4,r4,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r4,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#12*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 28==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 28<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#14*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#11*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#14*4] @ 29
@ ldr r1,[sp,#11*4]
mov r0,r2,ror#7
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#13*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#6*4]
add r3,r3,r0
eor r0,r11,r11,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r11,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#13*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 29==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 29<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#15*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#12*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#15*4] @ 30
@ ldr r1,[sp,#12*4]
mov r0,r2,ror#7
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#14*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#7*4]
add r12,r12,r0
eor r0,r10,r10,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r10,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#14*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 30==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 30<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#0*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#13*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#0*4] @ 31
@ ldr r1,[sp,#13*4]
mov r0,r2,ror#7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#15*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#8*4]
add r3,r3,r0
eor r0,r9,r9,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r9,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#15*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 31==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 31<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#1*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#14*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
ite eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r3,[sp,#16*4] @ pull ctx
bne .Lrounds_16_xx
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldr r0,[r3,#0]
ldr r2,[r3,#4]
ldr r12,[r3,#8]
add r4,r4,r0
ldr r0,[r3,#12]
add r5,r5,r2
ldr r2,[r3,#16]
add r6,r6,r12
ldr r12,[r3,#20]
add r7,r7,r0
ldr r0,[r3,#24]
add r8,r8,r2
ldr r2,[r3,#28]
add r9,r9,r12
ldr r1,[sp,#17*4] @ pull inp
ldr r12,[sp,#18*4] @ pull inp+len
add r10,r10,r0
add r11,r11,r2
stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
cmp r1,r12
sub r14,r14,#256 @ rewind Ktbl
bne .Loop
add sp,sp,#19*4 @ destroy frame
#if __ARM_ARCH>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.LK256_shortcut_neon:
@ PC is 8 bytes ahead in Arm mode and 4 bytes ahead in Thumb mode.
#if defined(__thumb2__)
.word K256-(.LK256_add_neon+4)
#else
.word K256-(.LK256_add_neon+8)
#endif
.globl sha256_block_data_order_neon
.hidden sha256_block_data_order_neon
.type sha256_block_data_order_neon,%function
.align 5
.skip 16
sha256_block_data_order_neon:
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
sub r11,sp,#16*4+16
@ K256 is just at the boundary of being easily referenced by an ADR from
@ this function. In Arm mode, when building with __ARM_ARCH=6, it does
@ not fit. By moving code around, we could make it fit, but this is too
@ fragile. For simplicity, just load the offset from
@ .LK256_shortcut_neon.
@
@ TODO(davidben): adrl would avoid a load, but clang-assembler does not
@ support it. We might be able to emulate it with a macro, but Android's
@ did not work when I tried it.
@ https://android.googlesource.com/platform/ndk/+/refs/heads/main/docs/ClangMigration.md#arm
ldr r14,.LK256_shortcut_neon
.LK256_add_neon:
add r14,pc,r14
bic r11,r11,#15 @ align for 128-bit stores
mov r12,sp
mov sp,r11 @ alloca
add r2,r1,r2,lsl#6 @ len to point at the end of inp
vld1.8 {q0},[r1]!
vld1.8 {q1},[r1]!
vld1.8 {q2},[r1]!
vld1.8 {q3},[r1]!
vld1.32 {q8},[r14,:128]!
vld1.32 {q9},[r14,:128]!
vld1.32 {q10},[r14,:128]!
vld1.32 {q11},[r14,:128]!
vrev32.8 q0,q0 @ yes, even on
str r0,[sp,#64]
vrev32.8 q1,q1 @ big-endian
str r1,[sp,#68]
mov r1,sp
vrev32.8 q2,q2
str r2,[sp,#72]
vrev32.8 q3,q3
str r12,[sp,#76] @ save original sp
vadd.i32 q8,q8,q0
vadd.i32 q9,q9,q1
vst1.32 {q8},[r1,:128]!
vadd.i32 q10,q10,q2
vst1.32 {q9},[r1,:128]!
vadd.i32 q11,q11,q3
vst1.32 {q10},[r1,:128]!
vst1.32 {q11},[r1,:128]!
ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
sub r1,r1,#64
ldr r2,[sp,#0]
eor r12,r12,r12
eor r3,r5,r6
b .L_00_48
.align 4
.L_00_48:
vext.8 q8,q0,q1,#4
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
vext.8 q9,q2,q3,#4
add r4,r4,r12
and r2,r2,r8
eor r12,r0,r8,ror#19
vshr.u32 q10,q8,#7
eor r0,r4,r4,ror#11
eor r2,r2,r10
vadd.i32 q0,q0,q9
add r11,r11,r12,ror#6
eor r12,r4,r5
vshr.u32 q9,q8,#3
eor r0,r0,r4,ror#20
add r11,r11,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#4]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
veor q9,q9,q10
add r10,r10,r2
vsli.32 q11,q8,#14
eor r2,r8,r9
eor r0,r7,r7,ror#5
vshr.u32 d24,d7,#17
add r11,r11,r3
and r2,r2,r7
veor q9,q9,q11
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
vsli.32 d24,d7,#15
eor r2,r2,r9
add r10,r10,r3,ror#6
vshr.u32 d25,d7,#10
eor r3,r11,r4
eor r0,r0,r11,ror#20
vadd.i32 q0,q0,q9
add r10,r10,r2
ldr r2,[sp,#8]
veor d25,d25,d24
and r12,r12,r3
add r6,r6,r10
vshr.u32 d24,d7,#19
add r10,r10,r0,ror#2
eor r12,r12,r4
vsli.32 d24,d7,#13
add r9,r9,r2
eor r2,r7,r8
veor d25,d25,d24
eor r0,r6,r6,ror#5
add r10,r10,r12
vadd.i32 d0,d0,d25
and r2,r2,r6
eor r12,r0,r6,ror#19
vshr.u32 d24,d0,#17
eor r0,r10,r10,ror#11
eor r2,r2,r8
vsli.32 d24,d0,#15
add r9,r9,r12,ror#6
eor r12,r10,r11
vshr.u32 d25,d0,#10
eor r0,r0,r10,ror#20
add r9,r9,r2
veor d25,d25,d24
ldr r2,[sp,#12]
and r3,r3,r12
vshr.u32 d24,d0,#19
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
vld1.32 {q8},[r14,:128]!
add r8,r8,r2
vsli.32 d24,d0,#13
eor r2,r6,r7
eor r0,r5,r5,ror#5
veor d25,d25,d24
add r9,r9,r3
and r2,r2,r5
vadd.i32 d1,d1,d25
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
vadd.i32 q8,q8,q0
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#16]
and r12,r12,r3
add r4,r4,r8
vst1.32 {q8},[r1,:128]!
add r8,r8,r0,ror#2
eor r12,r12,r10
vext.8 q8,q1,q2,#4
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
vext.8 q9,q3,q0,#4
add r8,r8,r12
and r2,r2,r4
eor r12,r0,r4,ror#19
vshr.u32 q10,q8,#7
eor r0,r8,r8,ror#11
eor r2,r2,r6
vadd.i32 q1,q1,q9
add r7,r7,r12,ror#6
eor r12,r8,r9
vshr.u32 q9,q8,#3
eor r0,r0,r8,ror#20
add r7,r7,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#20]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
veor q9,q9,q10
add r6,r6,r2
vsli.32 q11,q8,#14
eor r2,r4,r5
eor r0,r11,r11,ror#5
vshr.u32 d24,d1,#17
add r7,r7,r3
and r2,r2,r11
veor q9,q9,q11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
vsli.32 d24,d1,#15
eor r2,r2,r5
add r6,r6,r3,ror#6
vshr.u32 d25,d1,#10
eor r3,r7,r8
eor r0,r0,r7,ror#20
vadd.i32 q1,q1,q9
add r6,r6,r2
ldr r2,[sp,#24]
veor d25,d25,d24
and r12,r12,r3
add r10,r10,r6
vshr.u32 d24,d1,#19
add r6,r6,r0,ror#2
eor r12,r12,r8
vsli.32 d24,d1,#13
add r5,r5,r2
eor r2,r11,r4
veor d25,d25,d24
eor r0,r10,r10,ror#5
add r6,r6,r12
vadd.i32 d2,d2,d25
and r2,r2,r10
eor r12,r0,r10,ror#19
vshr.u32 d24,d2,#17
eor r0,r6,r6,ror#11
eor r2,r2,r4
vsli.32 d24,d2,#15
add r5,r5,r12,ror#6
eor r12,r6,r7
vshr.u32 d25,d2,#10
eor r0,r0,r6,ror#20
add r5,r5,r2
veor d25,d25,d24
ldr r2,[sp,#28]
and r3,r3,r12
vshr.u32 d24,d2,#19
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
vld1.32 {q8},[r14,:128]!
add r4,r4,r2
vsli.32 d24,d2,#13
eor r2,r10,r11
eor r0,r9,r9,ror#5
veor d25,d25,d24
add r5,r5,r3
and r2,r2,r9
vadd.i32 d3,d3,d25
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
vadd.i32 q8,q8,q1
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#32]
and r12,r12,r3
add r8,r8,r4
vst1.32 {q8},[r1,:128]!
add r4,r4,r0,ror#2
eor r12,r12,r6
vext.8 q8,q2,q3,#4
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
vext.8 q9,q0,q1,#4
add r4,r4,r12
and r2,r2,r8
eor r12,r0,r8,ror#19
vshr.u32 q10,q8,#7
eor r0,r4,r4,ror#11
eor r2,r2,r10
vadd.i32 q2,q2,q9
add r11,r11,r12,ror#6
eor r12,r4,r5
vshr.u32 q9,q8,#3
eor r0,r0,r4,ror#20
add r11,r11,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#36]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
veor q9,q9,q10
add r10,r10,r2
vsli.32 q11,q8,#14
eor r2,r8,r9
eor r0,r7,r7,ror#5
vshr.u32 d24,d3,#17
add r11,r11,r3
and r2,r2,r7
veor q9,q9,q11
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
vsli.32 d24,d3,#15
eor r2,r2,r9
add r10,r10,r3,ror#6
vshr.u32 d25,d3,#10
eor r3,r11,r4
eor r0,r0,r11,ror#20
vadd.i32 q2,q2,q9
add r10,r10,r2
ldr r2,[sp,#40]
veor d25,d25,d24
and r12,r12,r3
add r6,r6,r10
vshr.u32 d24,d3,#19
add r10,r10,r0,ror#2
eor r12,r12,r4
vsli.32 d24,d3,#13
add r9,r9,r2
eor r2,r7,r8
veor d25,d25,d24
eor r0,r6,r6,ror#5
add r10,r10,r12
vadd.i32 d4,d4,d25
and r2,r2,r6
eor r12,r0,r6,ror#19
vshr.u32 d24,d4,#17
eor r0,r10,r10,ror#11
eor r2,r2,r8
vsli.32 d24,d4,#15
add r9,r9,r12,ror#6
eor r12,r10,r11
vshr.u32 d25,d4,#10
eor r0,r0,r10,ror#20
add r9,r9,r2
veor d25,d25,d24
ldr r2,[sp,#44]
and r3,r3,r12
vshr.u32 d24,d4,#19
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
vld1.32 {q8},[r14,:128]!
add r8,r8,r2
vsli.32 d24,d4,#13
eor r2,r6,r7
eor r0,r5,r5,ror#5
veor d25,d25,d24
add r9,r9,r3
and r2,r2,r5
vadd.i32 d5,d5,d25
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
vadd.i32 q8,q8,q2
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#48]
and r12,r12,r3
add r4,r4,r8
vst1.32 {q8},[r1,:128]!
add r8,r8,r0,ror#2
eor r12,r12,r10
vext.8 q8,q3,q0,#4
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
vext.8 q9,q1,q2,#4
add r8,r8,r12
and r2,r2,r4
eor r12,r0,r4,ror#19
vshr.u32 q10,q8,#7
eor r0,r8,r8,ror#11
eor r2,r2,r6
vadd.i32 q3,q3,q9
add r7,r7,r12,ror#6
eor r12,r8,r9
vshr.u32 q9,q8,#3
eor r0,r0,r8,ror#20
add r7,r7,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#52]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
veor q9,q9,q10
add r6,r6,r2
vsli.32 q11,q8,#14
eor r2,r4,r5
eor r0,r11,r11,ror#5
vshr.u32 d24,d5,#17
add r7,r7,r3
and r2,r2,r11
veor q9,q9,q11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
vsli.32 d24,d5,#15
eor r2,r2,r5
add r6,r6,r3,ror#6
vshr.u32 d25,d5,#10
eor r3,r7,r8
eor r0,r0,r7,ror#20
vadd.i32 q3,q3,q9
add r6,r6,r2
ldr r2,[sp,#56]
veor d25,d25,d24
and r12,r12,r3
add r10,r10,r6
vshr.u32 d24,d5,#19
add r6,r6,r0,ror#2
eor r12,r12,r8
vsli.32 d24,d5,#13
add r5,r5,r2
eor r2,r11,r4
veor d25,d25,d24
eor r0,r10,r10,ror#5
add r6,r6,r12
vadd.i32 d6,d6,d25
and r2,r2,r10
eor r12,r0,r10,ror#19
vshr.u32 d24,d6,#17
eor r0,r6,r6,ror#11
eor r2,r2,r4
vsli.32 d24,d6,#15
add r5,r5,r12,ror#6
eor r12,r6,r7
vshr.u32 d25,d6,#10
eor r0,r0,r6,ror#20
add r5,r5,r2
veor d25,d25,d24
ldr r2,[sp,#60]
and r3,r3,r12
vshr.u32 d24,d6,#19
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
vld1.32 {q8},[r14,:128]!
add r4,r4,r2
vsli.32 d24,d6,#13
eor r2,r10,r11
eor r0,r9,r9,ror#5
veor d25,d25,d24
add r5,r5,r3
and r2,r2,r9
vadd.i32 d7,d7,d25
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
vadd.i32 q8,q8,q3
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[r14]
and r12,r12,r3
add r8,r8,r4
vst1.32 {q8},[r1,:128]!
add r4,r4,r0,ror#2
eor r12,r12,r6
teq r2,#0 @ check for K256 terminator
ldr r2,[sp,#0]
sub r1,r1,#64
bne .L_00_48
ldr r1,[sp,#68]
ldr r0,[sp,#72]
sub r14,r14,#256 @ rewind r14
teq r1,r0
it eq
subeq r1,r1,#64 @ avoid SEGV
vld1.8 {q0},[r1]! @ load next input block
vld1.8 {q1},[r1]!
vld1.8 {q2},[r1]!
vld1.8 {q3},[r1]!
it ne
strne r1,[sp,#68]
mov r1,sp
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
add r4,r4,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r8
eor r12,r0,r8,ror#19
eor r0,r4,r4,ror#11
eor r2,r2,r10
vrev32.8 q0,q0
add r11,r11,r12,ror#6
eor r12,r4,r5
eor r0,r0,r4,ror#20
add r11,r11,r2
vadd.i32 q8,q8,q0
ldr r2,[sp,#4]
and r3,r3,r12
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
add r10,r10,r2
eor r2,r8,r9
eor r0,r7,r7,ror#5
add r11,r11,r3
and r2,r2,r7
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
eor r2,r2,r9
add r10,r10,r3,ror#6
eor r3,r11,r4
eor r0,r0,r11,ror#20
add r10,r10,r2
ldr r2,[sp,#8]
and r12,r12,r3
add r6,r6,r10
add r10,r10,r0,ror#2
eor r12,r12,r4
add r9,r9,r2
eor r2,r7,r8
eor r0,r6,r6,ror#5
add r10,r10,r12
and r2,r2,r6
eor r12,r0,r6,ror#19
eor r0,r10,r10,ror#11
eor r2,r2,r8
add r9,r9,r12,ror#6
eor r12,r10,r11
eor r0,r0,r10,ror#20
add r9,r9,r2
ldr r2,[sp,#12]
and r3,r3,r12
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
add r8,r8,r2
eor r2,r6,r7
eor r0,r5,r5,ror#5
add r9,r9,r3
and r2,r2,r5
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#16]
and r12,r12,r3
add r4,r4,r8
add r8,r8,r0,ror#2
eor r12,r12,r10
vst1.32 {q8},[r1,:128]!
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
add r8,r8,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r4
eor r12,r0,r4,ror#19
eor r0,r8,r8,ror#11
eor r2,r2,r6
vrev32.8 q1,q1
add r7,r7,r12,ror#6
eor r12,r8,r9
eor r0,r0,r8,ror#20
add r7,r7,r2
vadd.i32 q8,q8,q1
ldr r2,[sp,#20]
and r3,r3,r12
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
add r6,r6,r2
eor r2,r4,r5
eor r0,r11,r11,ror#5
add r7,r7,r3
and r2,r2,r11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
eor r2,r2,r5
add r6,r6,r3,ror#6
eor r3,r7,r8
eor r0,r0,r7,ror#20
add r6,r6,r2
ldr r2,[sp,#24]
and r12,r12,r3
add r10,r10,r6
add r6,r6,r0,ror#2
eor r12,r12,r8
add r5,r5,r2
eor r2,r11,r4
eor r0,r10,r10,ror#5
add r6,r6,r12
and r2,r2,r10
eor r12,r0,r10,ror#19
eor r0,r6,r6,ror#11
eor r2,r2,r4
add r5,r5,r12,ror#6
eor r12,r6,r7
eor r0,r0,r6,ror#20
add r5,r5,r2
ldr r2,[sp,#28]
and r3,r3,r12
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
add r4,r4,r2
eor r2,r10,r11
eor r0,r9,r9,ror#5
add r5,r5,r3
and r2,r2,r9
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#32]
and r12,r12,r3
add r8,r8,r4
add r4,r4,r0,ror#2
eor r12,r12,r6
vst1.32 {q8},[r1,:128]!
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
add r4,r4,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r8
eor r12,r0,r8,ror#19
eor r0,r4,r4,ror#11
eor r2,r2,r10
vrev32.8 q2,q2
add r11,r11,r12,ror#6
eor r12,r4,r5
eor r0,r0,r4,ror#20
add r11,r11,r2
vadd.i32 q8,q8,q2
ldr r2,[sp,#36]
and r3,r3,r12
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
add r10,r10,r2
eor r2,r8,r9
eor r0,r7,r7,ror#5
add r11,r11,r3
and r2,r2,r7
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
eor r2,r2,r9
add r10,r10,r3,ror#6
eor r3,r11,r4
eor r0,r0,r11,ror#20
add r10,r10,r2
ldr r2,[sp,#40]
and r12,r12,r3
add r6,r6,r10
add r10,r10,r0,ror#2
eor r12,r12,r4
add r9,r9,r2
eor r2,r7,r8
eor r0,r6,r6,ror#5
add r10,r10,r12
and r2,r2,r6
eor r12,r0,r6,ror#19
eor r0,r10,r10,ror#11
eor r2,r2,r8
add r9,r9,r12,ror#6
eor r12,r10,r11
eor r0,r0,r10,ror#20
add r9,r9,r2
ldr r2,[sp,#44]
and r3,r3,r12
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
add r8,r8,r2
eor r2,r6,r7
eor r0,r5,r5,ror#5
add r9,r9,r3
and r2,r2,r5
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#48]
and r12,r12,r3
add r4,r4,r8
add r8,r8,r0,ror#2
eor r12,r12,r10
vst1.32 {q8},[r1,:128]!
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
add r8,r8,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r4
eor r12,r0,r4,ror#19
eor r0,r8,r8,ror#11
eor r2,r2,r6
vrev32.8 q3,q3
add r7,r7,r12,ror#6
eor r12,r8,r9
eor r0,r0,r8,ror#20
add r7,r7,r2
vadd.i32 q8,q8,q3
ldr r2,[sp,#52]
and r3,r3,r12
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
add r6,r6,r2
eor r2,r4,r5
eor r0,r11,r11,ror#5
add r7,r7,r3
and r2,r2,r11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
eor r2,r2,r5
add r6,r6,r3,ror#6
eor r3,r7,r8
eor r0,r0,r7,ror#20
add r6,r6,r2
ldr r2,[sp,#56]
and r12,r12,r3
add r10,r10,r6
add r6,r6,r0,ror#2
eor r12,r12,r8
add r5,r5,r2
eor r2,r11,r4
eor r0,r10,r10,ror#5
add r6,r6,r12
and r2,r2,r10
eor r12,r0,r10,ror#19
eor r0,r6,r6,ror#11
eor r2,r2,r4
add r5,r5,r12,ror#6
eor r12,r6,r7
eor r0,r0,r6,ror#20
add r5,r5,r2
ldr r2,[sp,#60]
and r3,r3,r12
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
add r4,r4,r2
eor r2,r10,r11
eor r0,r9,r9,ror#5
add r5,r5,r3
and r2,r2,r9
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#64]
and r12,r12,r3
add r8,r8,r4
add r4,r4,r0,ror#2
eor r12,r12,r6
vst1.32 {q8},[r1,:128]!
ldr r0,[r2,#0]
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldr r12,[r2,#4]
ldr r3,[r2,#8]
ldr r1,[r2,#12]
add r4,r4,r0 @ accumulate
ldr r0,[r2,#16]
add r5,r5,r12
ldr r12,[r2,#20]
add r6,r6,r3
ldr r3,[r2,#24]
add r7,r7,r1
ldr r1,[r2,#28]
add r8,r8,r0
str r4,[r2],#4
add r9,r9,r12
str r5,[r2],#4
add r10,r10,r3
str r6,[r2],#4
add r11,r11,r1
str r7,[r2],#4
stmia r2,{r8,r9,r10,r11}
ittte ne
movne r1,sp
ldrne r2,[sp,#0]
eorne r12,r12,r12
ldreq sp,[sp,#76] @ restore original sp
itt ne
eorne r3,r5,r6
bne .L_00_48
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
#endif
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 5,546
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghash-x86-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,@function
.align 16
gcm_init_clmul:
.L_gcm_init_clmul_begin:
movl 4(%esp),%edx
movl 8(%esp),%eax
call .L000pic
.L000pic:
popl %ecx
leal .Lbswap-.L000pic(%ecx),%ecx
movdqu (%eax),%xmm2
pshufd $78,%xmm2,%xmm2
pshufd $255,%xmm2,%xmm4
movdqa %xmm2,%xmm3
psllq $1,%xmm2
pxor %xmm5,%xmm5
psrlq $63,%xmm3
pcmpgtd %xmm4,%xmm5
pslldq $8,%xmm3
por %xmm3,%xmm2
pand 16(%ecx),%xmm5
pxor %xmm5,%xmm2
movdqa %xmm2,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm2,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm2,%xmm3
movdqu %xmm2,(%edx)
pxor %xmm0,%xmm4
movdqu %xmm0,16(%edx)
.byte 102,15,58,15,227,8
movdqu %xmm4,32(%edx)
ret
.size gcm_init_clmul,.-.L_gcm_init_clmul_begin
.globl gcm_ghash_clmul
.hidden gcm_ghash_clmul
.type gcm_ghash_clmul,@function
.align 16
gcm_ghash_clmul:
.L_gcm_ghash_clmul_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 28(%esp),%esi
movl 32(%esp),%ebx
call .L001pic
.L001pic:
popl %ecx
leal .Lbswap-.L001pic(%ecx),%ecx
movdqu (%eax),%xmm0
movdqa (%ecx),%xmm5
movdqu (%edx),%xmm2
.byte 102,15,56,0,197
subl $16,%ebx
jz .L002odd_tail
movdqu (%esi),%xmm3
movdqu 16(%esi),%xmm6
.byte 102,15,56,0,221
.byte 102,15,56,0,245
movdqu 32(%edx),%xmm5
pxor %xmm3,%xmm0
pshufd $78,%xmm6,%xmm3
movdqa %xmm6,%xmm7
pxor %xmm6,%xmm3
leal 32(%esi),%esi
.byte 102,15,58,68,242,0
.byte 102,15,58,68,250,17
.byte 102,15,58,68,221,0
movups 16(%edx),%xmm2
nop
subl $32,%ebx
jbe .L003even_tail
jmp .L004mod_loop
.align 32
.L004mod_loop:
pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
pxor %xmm0,%xmm4
nop
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,229,16
movups (%edx),%xmm2
xorps %xmm6,%xmm0
movdqa (%ecx),%xmm5
xorps %xmm7,%xmm1
movdqu (%esi),%xmm7
pxor %xmm0,%xmm3
movdqu 16(%esi),%xmm6
pxor %xmm1,%xmm3
.byte 102,15,56,0,253
pxor %xmm3,%xmm4
movdqa %xmm4,%xmm3
psrldq $8,%xmm4
pslldq $8,%xmm3
pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
.byte 102,15,56,0,245
pxor %xmm7,%xmm1
movdqa %xmm6,%xmm7
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
.byte 102,15,58,68,242,0
movups 32(%edx),%xmm5
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
pshufd $78,%xmm7,%xmm3
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm7,%xmm3
pxor %xmm4,%xmm1
.byte 102,15,58,68,250,17
movups 16(%edx),%xmm2
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.byte 102,15,58,68,221,0
leal 32(%esi),%esi
subl $32,%ebx
ja .L004mod_loop
.L003even_tail:
pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
pxor %xmm0,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,229,16
movdqa (%ecx),%xmm5
xorps %xmm6,%xmm0
xorps %xmm7,%xmm1
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
pxor %xmm3,%xmm4
movdqa %xmm4,%xmm3
psrldq $8,%xmm4
pslldq $8,%xmm3
pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
testl %ebx,%ebx
jnz .L005done
movups (%edx),%xmm2
.L002odd_tail:
movdqu (%esi),%xmm3
.byte 102,15,56,0,221
pxor %xmm3,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.L005done:
.byte 102,15,56,0,197
movdqu %xmm0,(%eax)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size gcm_ghash_clmul,.-.L_gcm_ghash_clmul_begin
.align 64
.Lbswap:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67
.byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112
.byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62
.byte 0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 4,159
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghashv8-armx-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_clmul
.def gcm_init_clmul
.type 32
.endef
.align 4
gcm_init_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.globl gcm_gmult_clmul
.def gcm_gmult_clmul
.type 32
.endef
.align 4
gcm_gmult_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mktmansour/MKT-KSA-Geolocation-Security
| 30,876
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/armv8-mont-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
.text
.globl bn_mul_mont_nohw
.hidden bn_mul_mont_nohw
.type bn_mul_mont_nohw,%function
.align 5
bn_mul_mont_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
ldr x9,[x2],#8 // bp[0]
sub x22,sp,x5,lsl#3
ldp x7,x8,[x1],#16 // ap[0..1]
lsl x5,x5,#3
ldr x4,[x4] // *n0
and x22,x22,#-16 // ABI says so
ldp x13,x14,[x3],#16 // np[0..1]
mul x6,x7,x9 // ap[0]*bp[0]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
mul x10,x8,x9 // ap[1]*bp[0]
umulh x11,x8,x9
mul x15,x6,x4 // "tp[0]"*n0
mov sp,x22 // alloca
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6 // discarded
// (*) As for removal of first multiplication and addition
// instructions. The outcome of first addition is
// guaranteed to be zero, which leaves two computationally
// significant outcomes: it either carries or not. Then
// question is when does it carry? Is there alternative
// way to deduce it? If you follow operations, you can
// observe that condition for carry is quite simple:
// x6 being non-zero. So that carry can be calculated
// by adding -1 to x6. That's what next instruction does.
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
adc x13,x13,xzr
cbz x21,.L1st_skip
.L1st:
ldr x8,[x1],#8
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
ldr x14,[x3],#8
adds x12,x16,x13
mul x10,x8,x9 // ap[j]*bp[0]
adc x13,x17,xzr
umulh x11,x8,x9
adds x12,x12,x6
mul x16,x14,x15 // np[j]*m1
adc x13,x13,xzr
umulh x17,x14,x15
str x12,[x22],#8 // tp[j-1]
cbnz x21,.L1st
.L1st_skip:
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adc x13,x17,xzr
adds x12,x12,x6
sub x20,x5,#8 // i=num-1
adcs x13,x13,x7
adc x19,xzr,xzr // upmost overflow bit
stp x12,x13,[x22]
.Louter:
ldr x9,[x2],#8 // bp[i]
ldp x7,x8,[x1],#16
ldr x23,[sp] // tp[0]
add x22,sp,#8
mul x6,x7,x9 // ap[0]*bp[i]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
ldp x13,x14,[x3],#16
mul x10,x8,x9 // ap[1]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x15,x6,x4
sub x20,x20,#8 // i--
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
cbz x21,.Linner_skip
.Linner:
ldr x8,[x1],#8
adc x13,x13,xzr
ldr x23,[x22],#8 // tp[j]
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
adds x12,x16,x13
ldr x14,[x3],#8
adc x13,x17,xzr
mul x10,x8,x9 // ap[j]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x16,x14,x15 // np[j]*m1
adds x12,x12,x6
umulh x17,x14,x15
str x12,[x22,#-16] // tp[j-1]
cbnz x21,.Linner
.Linner_skip:
ldr x23,[x22],#8 // tp[j]
adc x13,x13,xzr
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adcs x13,x17,x19
adc x19,xzr,xzr
adds x6,x6,x23
adc x7,x7,xzr
adds x12,x12,x6
adcs x13,x13,x7
adc x19,x19,xzr // upmost overflow bit
stp x12,x13,[x22,#-16]
cbnz x20,.Louter
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x14,[x3],#8 // np[0]
subs x21,x5,#8 // j=num-1 and clear borrow
mov x1,x0
.Lsub:
sbcs x8,x23,x14 // tp[j]-np[j]
ldr x23,[x22],#8
sub x21,x21,#8 // j--
ldr x14,[x3],#8
str x8,[x1],#8 // rp[j]=tp[j]-np[j]
cbnz x21,.Lsub
sbcs x8,x23,x14
sbcs x19,x19,xzr // did it borrow?
str x8,[x1],#8 // rp[num-1]
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x8,[x0],#8 // rp[0]
sub x5,x5,#8 // num--
nop
.Lcond_copy:
sub x5,x5,#8 // num--
csel x14,x23,x8,lo // did it borrow?
ldr x23,[x22],#8
ldr x8,[x0],#8
str xzr,[x22,#-16] // wipe tp
str x14,[x0,#-16]
cbnz x5,.Lcond_copy
csel x14,x23,x8,lo
str xzr,[x22,#-8] // wipe tp
str x14,[x0,#-8]
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldr x29,[sp],#64
AARCH64_VALIDATE_LINK_REGISTER
ret
.size bn_mul_mont_nohw,.-bn_mul_mont_nohw
.globl bn_sqr8x_mont
.hidden bn_sqr8x_mont
.type bn_sqr8x_mont,%function
.align 5
bn_sqr8x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x3,[sp,#96] // offload rp and np
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
ldp x12,x13,[x1,#8*6]
sub x2,sp,x5,lsl#4
lsl x5,x5,#3
ldr x4,[x4] // *n0
mov sp,x2 // alloca
sub x27,x5,#8*8
b .Lsqr8x_zero_start
.Lsqr8x_zero:
sub x27,x27,#8*8
stp xzr,xzr,[x2,#8*0]
stp xzr,xzr,[x2,#8*2]
stp xzr,xzr,[x2,#8*4]
stp xzr,xzr,[x2,#8*6]
.Lsqr8x_zero_start:
stp xzr,xzr,[x2,#8*8]
stp xzr,xzr,[x2,#8*10]
stp xzr,xzr,[x2,#8*12]
stp xzr,xzr,[x2,#8*14]
add x2,x2,#8*16
cbnz x27,.Lsqr8x_zero
add x3,x1,x5
add x1,x1,#8*8
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
mov x23,xzr
mov x24,xzr
mov x25,xzr
mov x26,xzr
mov x2,sp
str x4,[x29,#112] // offload n0
// Multiply everything but a[i]*a[i]
.align 4
.Lsqr8x_outer_loop:
// a[1]a[0] (i)
// a[2]a[0]
// a[3]a[0]
// a[4]a[0]
// a[5]a[0]
// a[6]a[0]
// a[7]a[0]
// a[2]a[1] (ii)
// a[3]a[1]
// a[4]a[1]
// a[5]a[1]
// a[6]a[1]
// a[7]a[1]
// a[3]a[2] (iii)
// a[4]a[2]
// a[5]a[2]
// a[6]a[2]
// a[7]a[2]
// a[4]a[3] (iv)
// a[5]a[3]
// a[6]a[3]
// a[7]a[3]
// a[5]a[4] (v)
// a[6]a[4]
// a[7]a[4]
// a[6]a[5] (vi)
// a[7]a[5]
// a[7]a[6] (vii)
mul x14,x7,x6 // lo(a[1..7]*a[0]) (i)
mul x15,x8,x6
mul x16,x9,x6
mul x17,x10,x6
adds x20,x20,x14 // t[1]+lo(a[1]*a[0])
mul x14,x11,x6
adcs x21,x21,x15
mul x15,x12,x6
adcs x22,x22,x16
mul x16,x13,x6
adcs x23,x23,x17
umulh x17,x7,x6 // hi(a[1..7]*a[0])
adcs x24,x24,x14
umulh x14,x8,x6
adcs x25,x25,x15
umulh x15,x9,x6
adcs x26,x26,x16
umulh x16,x10,x6
stp x19,x20,[x2],#8*2 // t[0..1]
adc x19,xzr,xzr // t[8]
adds x21,x21,x17 // t[2]+lo(a[1]*a[0])
umulh x17,x11,x6
adcs x22,x22,x14
umulh x14,x12,x6
adcs x23,x23,x15
umulh x15,x13,x6
adcs x24,x24,x16
mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii)
adcs x25,x25,x17
mul x17,x9,x7
adcs x26,x26,x14
mul x14,x10,x7
adc x19,x19,x15
mul x15,x11,x7
adds x22,x22,x16
mul x16,x12,x7
adcs x23,x23,x17
mul x17,x13,x7
adcs x24,x24,x14
umulh x14,x8,x7 // hi(a[2..7]*a[1])
adcs x25,x25,x15
umulh x15,x9,x7
adcs x26,x26,x16
umulh x16,x10,x7
adcs x19,x19,x17
umulh x17,x11,x7
stp x21,x22,[x2],#8*2 // t[2..3]
adc x20,xzr,xzr // t[9]
adds x23,x23,x14
umulh x14,x12,x7
adcs x24,x24,x15
umulh x15,x13,x7
adcs x25,x25,x16
mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii)
adcs x26,x26,x17
mul x17,x10,x8
adcs x19,x19,x14
mul x14,x11,x8
adc x20,x20,x15
mul x15,x12,x8
adds x24,x24,x16
mul x16,x13,x8
adcs x25,x25,x17
umulh x17,x9,x8 // hi(a[3..7]*a[2])
adcs x26,x26,x14
umulh x14,x10,x8
adcs x19,x19,x15
umulh x15,x11,x8
adcs x20,x20,x16
umulh x16,x12,x8
stp x23,x24,[x2],#8*2 // t[4..5]
adc x21,xzr,xzr // t[10]
adds x25,x25,x17
umulh x17,x13,x8
adcs x26,x26,x14
mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv)
adcs x19,x19,x15
mul x15,x11,x9
adcs x20,x20,x16
mul x16,x12,x9
adc x21,x21,x17
mul x17,x13,x9
adds x26,x26,x14
umulh x14,x10,x9 // hi(a[4..7]*a[3])
adcs x19,x19,x15
umulh x15,x11,x9
adcs x20,x20,x16
umulh x16,x12,x9
adcs x21,x21,x17
umulh x17,x13,x9
stp x25,x26,[x2],#8*2 // t[6..7]
adc x22,xzr,xzr // t[11]
adds x19,x19,x14
mul x14,x11,x10 // lo(a[5..7]*a[4]) (v)
adcs x20,x20,x15
mul x15,x12,x10
adcs x21,x21,x16
mul x16,x13,x10
adc x22,x22,x17
umulh x17,x11,x10 // hi(a[5..7]*a[4])
adds x20,x20,x14
umulh x14,x12,x10
adcs x21,x21,x15
umulh x15,x13,x10
adcs x22,x22,x16
mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi)
adc x23,xzr,xzr // t[12]
adds x21,x21,x17
mul x17,x13,x11
adcs x22,x22,x14
umulh x14,x12,x11 // hi(a[6..7]*a[5])
adc x23,x23,x15
umulh x15,x13,x11
adds x22,x22,x16
mul x16,x13,x12 // lo(a[7]*a[6]) (vii)
adcs x23,x23,x17
umulh x17,x13,x12 // hi(a[7]*a[6])
adc x24,xzr,xzr // t[13]
adds x23,x23,x14
sub x27,x3,x1 // done yet?
adc x24,x24,x15
adds x24,x24,x16
sub x14,x3,x5 // rewinded ap
adc x25,xzr,xzr // t[14]
add x25,x25,x17
cbz x27,.Lsqr8x_outer_break
mov x4,x6
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x0,x1
adcs x26,xzr,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved below
mov x27,#-8*8
// a[8]a[0]
// a[9]a[0]
// a[a]a[0]
// a[b]a[0]
// a[c]a[0]
// a[d]a[0]
// a[e]a[0]
// a[f]a[0]
// a[8]a[1]
// a[f]a[1]........................
// a[8]a[2]
// a[f]a[2]........................
// a[8]a[3]
// a[f]a[3]........................
// a[8]a[4]
// a[f]a[4]........................
// a[8]a[5]
// a[f]a[5]........................
// a[8]a[6]
// a[f]a[6]........................
// a[8]a[7]
// a[f]a[7]........................
.Lsqr8x_mul:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,.Lsqr8x_mul
// note that carry flag is guaranteed
// to be zero at this point
cmp x1,x3 // done yet?
b.eq .Lsqr8x_break
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
ldr x4,[x0,#-8*8]
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b .Lsqr8x_mul
.align 4
.Lsqr8x_break:
ldp x6,x7,[x0,#8*0]
add x1,x0,#8*8
ldp x8,x9,[x0,#8*2]
sub x14,x3,x1 // is it last iteration?
ldp x10,x11,[x0,#8*4]
sub x15,x2,x14
ldp x12,x13,[x0,#8*6]
cbz x14,.Lsqr8x_outer_loop
stp x19,x20,[x2,#8*0]
ldp x19,x20,[x15,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x15,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x15,#8*4]
stp x25,x26,[x2,#8*6]
mov x2,x15
ldp x25,x26,[x15,#8*6]
b .Lsqr8x_outer_loop
.align 4
.Lsqr8x_outer_break:
// Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0]
ldp x15,x16,[sp,#8*1]
ldp x11,x13,[x14,#8*2]
add x1,x14,#8*4
ldp x17,x14,[sp,#8*3]
stp x19,x20,[x2,#8*0]
mul x19,x7,x7
stp x21,x22,[x2,#8*2]
umulh x7,x7,x7
stp x23,x24,[x2,#8*4]
mul x8,x9,x9
stp x25,x26,[x2,#8*6]
mov x2,sp
umulh x9,x9,x9
adds x20,x7,x15,lsl#1
extr x15,x16,x15,#63
sub x27,x5,#8*4
.Lsqr4x_shift_n_add:
adcs x21,x8,x15
extr x16,x17,x16,#63
sub x27,x27,#8*4
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
ldp x7,x9,[x1],#8*2
umulh x11,x11,x11
mul x12,x13,x13
umulh x13,x13,x13
extr x17,x14,x17,#63
stp x19,x20,[x2,#8*0]
adcs x23,x10,x17
extr x14,x15,x14,#63
stp x21,x22,[x2,#8*2]
adcs x24,x11,x14
ldp x17,x14,[x2,#8*7]
extr x15,x16,x15,#63
adcs x25,x12,x15
extr x16,x17,x16,#63
adcs x26,x13,x16
ldp x15,x16,[x2,#8*9]
mul x6,x7,x7
ldp x11,x13,[x1],#8*2
umulh x7,x7,x7
mul x8,x9,x9
umulh x9,x9,x9
stp x23,x24,[x2,#8*4]
extr x17,x14,x17,#63
stp x25,x26,[x2,#8*6]
add x2,x2,#8*8
adcs x19,x6,x17
extr x14,x15,x14,#63
adcs x20,x7,x14
ldp x17,x14,[x2,#8*3]
extr x15,x16,x15,#63
cbnz x27,.Lsqr4x_shift_n_add
ldp x1,x4,[x29,#104] // pull np and n0
adcs x21,x8,x15
extr x16,x17,x16,#63
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
umulh x11,x11,x11
stp x19,x20,[x2,#8*0]
mul x12,x13,x13
umulh x13,x13,x13
stp x21,x22,[x2,#8*2]
extr x17,x14,x17,#63
adcs x23,x10,x17
extr x14,x15,x14,#63
ldp x19,x20,[sp,#8*0]
adcs x24,x11,x14
extr x15,x16,x15,#63
ldp x6,x7,[x1,#8*0]
adcs x25,x12,x15
extr x16,xzr,x16,#63
ldp x8,x9,[x1,#8*2]
adc x26,x13,x16
ldp x10,x11,[x1,#8*4]
// Reduce by 512 bits per iteration
mul x28,x4,x19 // t[0]*n0
ldp x12,x13,[x1,#8*6]
add x3,x1,x5
ldp x21,x22,[sp,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[sp,#8*4]
stp x25,x26,[x2,#8*6]
ldp x25,x26,[sp,#8*6]
add x1,x1,#8*8
mov x30,xzr // initial top-most carry
mov x2,sp
mov x27,#8
.Lsqr8x_reduction:
// (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0)
mul x15,x7,x28
sub x27,x27,#1
mul x16,x8,x28
str x28,[x2],#8 // put aside t[0]*n0 for tail processing
mul x17,x9,x28
// (*) adds xzr,x19,x14
subs xzr,x19,#1 // (*)
mul x14,x10,x28
adcs x19,x20,x15
mul x15,x11,x28
adcs x20,x21,x16
mul x16,x12,x28
adcs x21,x22,x17
mul x17,x13,x28
adcs x22,x23,x14
umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0)
adcs x23,x24,x15
umulh x15,x7,x28
adcs x24,x25,x16
umulh x16,x8,x28
adcs x25,x26,x17
umulh x17,x9,x28
adc x26,xzr,xzr
adds x19,x19,x14
umulh x14,x10,x28
adcs x20,x20,x15
umulh x15,x11,x28
adcs x21,x21,x16
umulh x16,x12,x28
adcs x22,x22,x17
umulh x17,x13,x28
mul x28,x4,x19 // next t[0]*n0
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adc x26,x26,x17
cbnz x27,.Lsqr8x_reduction
ldp x14,x15,[x2,#8*0]
ldp x16,x17,[x2,#8*2]
mov x0,x2
sub x27,x3,x1 // done yet?
adds x19,x19,x14
adcs x20,x20,x15
ldp x14,x15,[x2,#8*4]
adcs x21,x21,x16
adcs x22,x22,x17
ldp x16,x17,[x2,#8*6]
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adcs x26,x26,x17
//adc x28,xzr,xzr // moved below
cbz x27,.Lsqr8x8_post_condition
ldr x4,[x2,#-8*8]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
mov x27,#-8*8
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
.Lsqr8x_tail:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,.Lsqr8x_tail
// note that carry flag is guaranteed
// to be zero at this point
ldp x6,x7,[x2,#8*0]
sub x27,x3,x1 // done yet?
sub x16,x3,x5 // rewinded np
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
cbz x27,.Lsqr8x_tail_break
ldr x4,[x0,#-8*8]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b .Lsqr8x_tail
.align 4
.Lsqr8x_tail_break:
ldr x4,[x29,#112] // pull n0
add x27,x2,#8*8 // end of current t[num] window
subs xzr,x30,#1 // "move" top-most carry to carry bit
adcs x14,x19,x6
adcs x15,x20,x7
ldp x19,x20,[x0,#8*0]
adcs x21,x21,x8
ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0]
adcs x22,x22,x9
ldp x8,x9,[x16,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x16,#8*4]
adcs x25,x25,x12
adcs x26,x26,x13
ldp x12,x13,[x16,#8*6]
add x1,x16,#8*8
adc x30,xzr,xzr // top-most carry
mul x28,x4,x19
stp x14,x15,[x2,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x0,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x0,#8*4]
cmp x27,x29 // did we hit the bottom?
stp x25,x26,[x2,#8*6]
mov x2,x0 // slide the window
ldp x25,x26,[x0,#8*6]
mov x27,#8
b.ne .Lsqr8x_reduction
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x0,[x29,#96] // pull rp
add x2,x2,#8*8
subs x14,x19,x6
sbcs x15,x20,x7
sub x27,x5,#8*8
mov x3,x0 // x0 copy
.Lsqr8x_sub:
sbcs x16,x21,x8
ldp x6,x7,[x1,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x1,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x10,x11,[x1,#8*4]
sbcs x17,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
ldp x19,x20,[x2,#8*0]
sub x27,x27,#8*8
ldp x21,x22,[x2,#8*2]
ldp x23,x24,[x2,#8*4]
ldp x25,x26,[x2,#8*6]
add x2,x2,#8*8
stp x14,x15,[x0,#8*4]
sbcs x14,x19,x6
stp x16,x17,[x0,#8*6]
add x0,x0,#8*8
sbcs x15,x20,x7
cbnz x27,.Lsqr8x_sub
sbcs x16,x21,x8
mov x2,sp
add x1,sp,x5
ldp x6,x7,[x3,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x3,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x19,x20,[x1,#8*0]
sbcs x17,x26,x13
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
stp x14,x15,[x0,#8*4]
stp x16,x17,[x0,#8*6]
sub x27,x5,#8*4
.Lsqr4x_cond_copy:
sub x27,x27,#8*4
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
ldp x6,x7,[x3,#8*4]
ldp x19,x20,[x1,#8*4]
csel x16,x21,x8,lo
stp xzr,xzr,[x2,#8*2]
add x2,x2,#8*4
csel x17,x22,x9,lo
ldp x8,x9,[x3,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
add x3,x3,#8*4
stp xzr,xzr,[x1,#8*0]
stp xzr,xzr,[x1,#8*2]
cbnz x27,.Lsqr4x_cond_copy
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
stp xzr,xzr,[x2,#8*2]
csel x16,x21,x8,lo
csel x17,x22,x9,lo
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
b .Lsqr8x_done
.align 4
.Lsqr8x8_post_condition:
adc x28,xzr,xzr
ldr x30,[x29,#8] // pull return address
// x19-7,x28 hold result, x6-7 hold modulus
subs x6,x19,x6
ldr x1,[x29,#96] // pull rp
sbcs x7,x20,x7
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x8
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x9
stp xzr,xzr,[sp,#8*4]
sbcs x10,x23,x10
stp xzr,xzr,[sp,#8*6]
sbcs x11,x24,x11
stp xzr,xzr,[sp,#8*8]
sbcs x12,x25,x12
stp xzr,xzr,[sp,#8*10]
sbcs x13,x26,x13
stp xzr,xzr,[sp,#8*12]
sbcs x28,x28,xzr // did it borrow?
stp xzr,xzr,[sp,#8*14]
// x6-7 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
csel x10,x23,x10,lo
csel x11,x24,x11,lo
stp x8,x9,[x1,#8*2]
csel x12,x25,x12,lo
csel x13,x26,x13,lo
stp x10,x11,[x1,#8*4]
stp x12,x13,[x1,#8*6]
.Lsqr8x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.size bn_sqr8x_mont,.-bn_sqr8x_mont
.globl bn_mul4x_mont
.hidden bn_mul4x_mont
.type bn_mul4x_mont,%function
.align 5
bn_mul4x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub x26,sp,x5,lsl#3
lsl x5,x5,#3
ldr x4,[x4] // *n0
sub sp,x26,#8*4 // alloca
add x10,x2,x5
add x27,x1,x5
stp x0,x10,[x29,#96] // offload rp and &b[num]
ldr x24,[x2,#8*0] // b[0]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
ldp x14,x15,[x3,#8*0] // n[0..3]
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
mov x28,#0
mov x26,sp
.Loop_mul4x_1st_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[0])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[0])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
// (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0)
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0)
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
sub x10,x27,x1
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,.Loop_mul4x_1st_reduction
cbz x10,.Lmul4x4_post_condition
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldr x25,[sp] // a[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.Loop_mul4x_1st_tail:
mul x10,x6,x24 // lo(a[4..7]*b[i])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[i])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*a[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
adcs x23,x23,x0
umulh x13,x17,x25
adc x0,xzr,xzr
ldr x25,[sp,x28] // next t[0]*n0
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,.Loop_mul4x_1st_tail
sub x11,x27,x5 // rewinded x1
cbz x10,.Lmul4x_proceed
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b .Loop_mul4x_1st_tail
.align 5
.Lmul4x_proceed:
ldr x24,[x2,#8*4]! // *++b
adc x30,x0,xzr
ldp x6,x7,[x11,#8*0] // a[0..3]
sub x3,x3,x5 // rewind np
ldp x8,x9,[x11,#8*2]
add x1,x11,#8*4
stp x19,x20,[x26,#8*0] // result!!!
ldp x19,x20,[sp,#8*4] // t[0..3]
stp x21,x22,[x26,#8*2] // result!!!
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x3,#8*0] // n[0..3]
mov x26,sp
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
.align 4
.Loop_mul4x_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[4])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
// (*) mul x10,x14,x25
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25 // lo(n[0..3]*t[0]*n0
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,.Loop_mul4x_reduction
adc x0,x0,xzr
ldp x10,x11,[x26,#8*4] // t[4..7]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldr x25,[sp] // t[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.align 4
.Loop_mul4x_tail:
mul x10,x6,x24 // lo(a[4..7]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[4])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*t[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
umulh x13,x17,x25
adcs x23,x23,x0
ldr x25,[sp,x28] // next a[0]*n0
adc x0,xzr,xzr
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,.Loop_mul4x_tail
sub x11,x3,x5 // rewinded np?
adc x0,x0,xzr
cbz x10,.Loop_mul4x_break
ldp x10,x11,[x26,#8*4]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b .Loop_mul4x_tail
.align 4
.Loop_mul4x_break:
ldp x12,x13,[x29,#96] // pull rp and &b[num]
adds x19,x19,x30
add x2,x2,#8*4 // bp++
adcs x20,x20,xzr
sub x1,x1,x5 // rewind ap
adcs x21,x21,xzr
stp x19,x20,[x26,#8*0] // result!!!
adcs x22,x22,xzr
ldp x19,x20,[sp,#8*4] // t[0..3]
adc x30,x0,xzr
stp x21,x22,[x26,#8*2] // result!!!
cmp x2,x13 // done yet?
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x11,#8*0] // n[0..3]
ldp x16,x17,[x11,#8*2]
add x3,x11,#8*4
b.eq .Lmul4x_post
ldr x24,[x2]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
adds x1,x1,#8*4 // clear carry bit
mov x0,xzr
mov x26,sp
b .Loop_mul4x_reduction
.align 4
.Lmul4x_post:
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
mov x0,x12
mov x27,x12 // x0 copy
subs x10,x19,x14
add x26,sp,#8*8
sbcs x11,x20,x15
sub x28,x5,#8*4
.Lmul4x_sub:
sbcs x12,x21,x16
ldp x14,x15,[x3,#8*0]
sub x28,x28,#8*4
ldp x19,x20,[x26,#8*0]
sbcs x13,x22,x17
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
ldp x21,x22,[x26,#8*2]
add x26,x26,#8*4
stp x10,x11,[x0,#8*0]
sbcs x10,x19,x14
stp x12,x13,[x0,#8*2]
add x0,x0,#8*4
sbcs x11,x20,x15
cbnz x28,.Lmul4x_sub
sbcs x12,x21,x16
mov x26,sp
add x1,sp,#8*4
ldp x6,x7,[x27,#8*0]
sbcs x13,x22,x17
stp x10,x11,[x0,#8*0]
ldp x8,x9,[x27,#8*2]
stp x12,x13,[x0,#8*2]
ldp x19,x20,[x1,#8*0]
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
sub x28,x5,#8*4
.Lmul4x_cond_copy:
sub x28,x28,#8*4
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
ldp x6,x7,[x27,#8*4]
ldp x19,x20,[x1,#8*4]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*2]
add x26,x26,#8*4
csel x13,x22,x9,lo
ldp x8,x9,[x27,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
add x27,x27,#8*4
cbnz x28,.Lmul4x_cond_copy
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
stp xzr,xzr,[x26,#8*2]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*3]
csel x13,x22,x9,lo
stp xzr,xzr,[x26,#8*4]
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
b .Lmul4x_done
.align 4
.Lmul4x4_post_condition:
adc x0,x0,xzr
ldr x1,[x29,#96] // pull rp
// x19-3,x0 hold result, x14-7 hold modulus
subs x6,x19,x14
ldr x30,[x29,#8] // pull return address
sbcs x7,x20,x15
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x16
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x17
stp xzr,xzr,[sp,#8*4]
sbcs xzr,x0,xzr // did it borrow?
stp xzr,xzr,[sp,#8*6]
// x6-3 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
stp x8,x9,[x1,#8*2]
.Lmul4x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.size bn_mul4x_mont,.-bn_mul4x_mont
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 4
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 74,317
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha20_poly1305_armv8-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
.section .rodata
.align 7
.Lchacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.Linc:
.long 1,2,3,4
.Lrol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.Lclamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.text
.type .Lpoly_hash_ad_internal,%function
.align 6
.Lpoly_hash_ad_internal:
.cfi_startproc
cbnz x4, .Lpoly_hash_intro
ret
.Lpoly_hash_intro:
cmp x4, #16
b.lt .Lpoly_hash_ad_tail
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b .Lpoly_hash_ad_internal
.Lpoly_hash_ad_tail:
cbz x4, .Lpoly_hash_ad_ret
eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD
sub x4, x4, #1
.Lpoly_hash_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, x4]
mov v20.b[0], w11
subs x4, x4, #1
b.ge .Lpoly_hash_tail_16_compose
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lpoly_hash_ad_ret:
ret
.cfi_endproc
.size .Lpoly_hash_ad_internal, .-.Lpoly_hash_ad_internal
/////////////////////////////////
//
// void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data);
//
.globl chacha20_poly1305_seal
.hidden chacha20_poly1305_seal
.type chacha20_poly1305_seal,%function
.align 6
chacha20_poly1305_seal:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // .Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
ldr x12, [x5, #56] // The total cipher text length includes extra_in_len
add x12, x12, x2
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x12
cmp x2, #128
b.le .Lseal_128 // Optimization for smaller buffers
// Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext,
// and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically,
// the fifth block (A4-D4) horizontally.
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
sub x5, x5, #32
mov x6, #10
.align 5
.Lseal_init_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.hi .Lseal_init_rounds
add v15.4s, v15.4s, v25.4s
mov x11, #4
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
and v4.16b, v4.16b, v27.16b
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
mov x16, v4.d[0] // Move the R key to GPRs
mov x17, v4.d[1]
mov v27.16b, v9.16b // Store the S key
bl .Lpoly_hash_ad_internal
mov x3, x0
cmp x2, #256
b.le .Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #256
mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds
mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256
.Lseal_main_loop:
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
sub x5, x5, #32
.align 5
.Lseal_main_loop_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.ge .Lseal_main_loop_rounds
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
subs x7, x7, #1
b.gt .Lseal_main_loop_rounds
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
cmp x2, #320
b.le .Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #320
mov x6, #0
mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration
b .Lseal_main_loop
.Lseal_tail:
// This part of the function handles the storage and authentication of the last [0,320) bytes
// We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data.
cmp x2, #64
b.lt .Lseal_tail_64
// Store and authenticate 64B blocks per iteration
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
// Shift the state left by 64 bytes for the next iteration of the loop
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
mov v1.16b, v2.16b
mov v6.16b, v7.16b
mov v11.16b, v12.16b
mov v16.16b, v17.16b
mov v2.16b, v3.16b
mov v7.16b, v8.16b
mov v12.16b, v13.16b
mov v17.16b, v18.16b
mov v3.16b, v4.16b
mov v8.16b, v9.16b
mov v13.16b, v14.16b
mov v18.16b, v19.16b
b .Lseal_tail
.Lseal_tail_64:
ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr
// Here we handle the last [0,64) bytes of plaintext
cmp x2, #16
b.lt .Lseal_tail_16
// Each iteration encrypt and authenticate a 16B block
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b}, [x0], #16
sub x2, x2, #16
// Shift the state left by 16 bytes for the next iteration of the loop
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
b .Lseal_tail_64
.Lseal_tail_16:
// Here we handle the last [0,16) bytes of ciphertext that require a padded block
cbz x2, .Lseal_hash_extra
eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes
not v22.16b, v20.16b
mov x6, x2
add x1, x1, x2
cbz x4, .Lseal_tail_16_compose // No extra data to pad with, zero padding
mov x7, #16 // We need to load some extra_in first for padding
sub x7, x7, x2
cmp x4, x7
csel x7, x4, x7, lt // .Load the minimum of extra_in_len and the amount needed to fill the register
mov x12, x7
add x3, x3, x7
sub x4, x4, x7
.Lseal_tail16_compose_extra_in:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x7, x7, #1
b.gt .Lseal_tail16_compose_extra_in
add x3, x3, x12
.Lseal_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x1, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt .Lseal_tail_16_compose
and v0.16b, v0.16b, v21.16b
eor v20.16b, v20.16b, v0.16b
mov v21.16b, v20.16b
.Lseal_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt .Lseal_tail_16_store
// Hash in the final ct block concatenated with extra_in
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lseal_hash_extra:
cbz x4, .Lseal_finalize
.Lseal_hash_extra_loop:
cmp x4, #16
b.lt .Lseal_hash_extra_tail
ld1 {v20.16b}, [x3], #16
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b .Lseal_hash_extra_loop
.Lseal_hash_extra_tail:
cbz x4, .Lseal_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext
add x3, x3, x4
.Lseal_hash_extra_load:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x4, x4, #1
b.gt .Lseal_hash_extra_load
// Hash in the final padded extra_in blcok
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lseal_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
.Lseal_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
.Lseal_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi .Lseal_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
// Only the first 32 bytes of the third block (counter = 0) are needed,
// so skip updating v12 and v17.
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl .Lpoly_hash_ad_internal
b .Lseal_tail
.cfi_endproc
.size chacha20_poly1305_seal,.-chacha20_poly1305_seal
/////////////////////////////////
//
// void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data);
//
.globl chacha20_poly1305_open
.hidden chacha20_poly1305_open
.type chacha20_poly1305_open,%function
.align 6
chacha20_poly1305_open:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // .Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x2
cmp x2, #128
b.le .Lopen_128 // Optimization for smaller buffers
// Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
mov x6, #10
.align 5
.Lopen_init_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.hi .Lopen_init_rounds
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
and v0.16b, v0.16b, v27.16b
mov x16, v0.d[0] // Move the R key to GPRs
mov x17, v0.d[1]
mov v27.16b, v5.16b // Store the S key
bl .Lpoly_hash_ad_internal
.Lopen_ad_done:
mov x3, x1
// Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes
.Lopen_main_loop:
cmp x2, #192
b.lt .Lopen_tail
adrp x11, .Lchacha20_consts
add x11, x11, :lo12:.Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
sub x5, x5, #32
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12
sub x4, x4, #10
mov x7, #10
subs x6, x7, x4
subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full
cbz x7, .Lopen_main_loop_rounds_short
.align 5
.Lopen_main_loop_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lopen_main_loop_rounds_short:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x7, x7, #1
b.gt .Lopen_main_loop_rounds
subs x6, x6, #1
b.ge .Lopen_main_loop_rounds_short
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
// We can always safely store 192 bytes
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #192
mov v0.16b, v3.16b
mov v5.16b, v8.16b
mov v10.16b, v13.16b
mov v15.16b, v18.16b
cmp x2, #64
b.lt .Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v4.16b
mov v5.16b, v9.16b
mov v10.16b, v14.16b
mov v15.16b, v19.16b
cmp x2, #64
b.lt .Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b .Lopen_main_loop
.Lopen_tail:
cbz x2, .Lopen_finalize
lsr x4, x2, #4 // How many whole blocks we have to hash
cmp x2, #64
b.le .Lopen_tail_64
cmp x2, #128
b.le .Lopen_tail_128
.Lopen_tail_192:
// We need three more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
mov v17.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v21.16b, v21.16b, v21.16b
ins v23.s[0], v25.s[0]
ins v21.d[0], x15
add v22.4s, v23.4s, v21.4s
add v21.4s, v22.4s, v21.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
mov x7, #10
subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing
sub x4, x4, x7
cbz x7, .Lopen_tail_192_rounds_no_hash
.Lopen_tail_192_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
.Lopen_tail_192_rounds_no_hash:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x7, x7, #1
b.gt .Lopen_tail_192_rounds
subs x6, x6, #1
b.ge .Lopen_tail_192_rounds_no_hash
// We hashed 160 bytes at most, may still have 32 bytes left
.Lopen_tail_192_hash:
cbz x4, .Lopen_tail_192_hash_done
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b .Lopen_tail_192_hash
.Lopen_tail_192_hash_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v12.4s, v12.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v17.4s, v17.4s, v30.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #128
b .Lopen_tail_64_store
.Lopen_tail_128:
// We need two more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v22.16b, v22.16b, v22.16b
ins v23.s[0], v25.s[0]
ins v22.d[0], x15
add v22.4s, v22.4s, v23.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
mov x6, #10
sub x6, x6, x4
.Lopen_tail_128_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #4
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #12
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #4
subs x6, x6, #1
b.gt .Lopen_tail_128_rounds
cbz x4, .Lopen_tail_128_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b .Lopen_tail_128_rounds
.Lopen_tail_128_rounds_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b .Lopen_tail_64_store
.Lopen_tail_64:
// We just need a single block
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
ins v23.s[0], v25.s[0]
add v15.4s, v15.4s, v23.4s
mov x6, #10
sub x6, x6, x4
.Lopen_tail_64_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.gt .Lopen_tail_64_rounds
cbz x4, .Lopen_tail_64_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b .Lopen_tail_64_rounds
.Lopen_tail_64_rounds_done:
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v15.4s, v15.4s, v23.4s
.Lopen_tail_64_store:
cmp x2, #16
b.lt .Lopen_tail_16
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
st1 {v20.16b}, [x0], #16
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
sub x2, x2, #16
b .Lopen_tail_64_store
.Lopen_tail_16:
// Here we handle the last [0,16) bytes that require a padded block
cbz x2, .Lopen_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask
not v22.16b, v20.16b
add x7, x1, x2
mov x6, x2
.Lopen_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x7, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt .Lopen_tail_16_compose
and v20.16b, v20.16b, v21.16b
// Hash in the final padded block
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
.Lopen_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt .Lopen_tail_16_store
.Lopen_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
.Lopen_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
.Lopen_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi .Lopen_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl .Lpoly_hash_ad_internal
.Lopen_128_store:
cmp x2, #64
b.lt .Lopen_128_store_64
ld1 {v20.16b - v23.16b}, [x1], #64
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
.Lopen_128_store_64:
lsr x4, x2, #4
mov x3, x1
.Lopen_128_hash_64:
cbz x4, .Lopen_tail_64_store
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b .Lopen_128_hash_64
.cfi_endproc
.size chacha20_poly1305_open,.-chacha20_poly1305_open
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 15,121
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aesni-x86-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.hidden _aesni_encrypt2
.type _aesni_encrypt2,@function
.align 16
_aesni_encrypt2:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L000enc2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L000enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.size _aesni_encrypt2,.-_aesni_encrypt2
.hidden _aesni_encrypt3
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L001enc3_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%edx,%ecx,1),%xmm0
jnz .L001enc3_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.size _aesni_encrypt3,.-_aesni_encrypt3
.hidden _aesni_encrypt4
.type _aesni_encrypt4,@function
.align 16
_aesni_encrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 15,31,64,0
addl $16,%ecx
.L002enc4_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%edx,%ecx,1),%xmm0
jnz .L002enc4_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.size _aesni_encrypt4,.-_aesni_encrypt4
.hidden _aesni_encrypt6
.type _aesni_encrypt6,@function
.align 16
_aesni_encrypt6:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,217
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%edx,%ecx,1),%xmm0
addl $16,%ecx
jmp .L003_aesni_encrypt6_inner
.align 16
.L004enc6_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.L003_aesni_encrypt6_inner:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.L_aesni_encrypt6_enter:
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%edx,%ecx,1),%xmm0
jnz .L004enc6_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.size _aesni_encrypt6,.-_aesni_encrypt6
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,@function
.align 16
aes_hw_ctr32_encrypt_blocks:
.L_aes_hw_ctr32_encrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L005pic_for_function_hit
.L005pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+0-.L005pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $88,%esp
andl $-16,%esp
movl %ebp,80(%esp)
cmpl $1,%eax
je .L006ctr32_one_shortcut
movdqu (%ebx),%xmm7
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $6,%ecx
xorl %ebp,%ebp
movl %ecx,16(%esp)
movl %ecx,20(%esp)
movl %ecx,24(%esp)
movl %ebp,28(%esp)
.byte 102,15,58,22,251,3
.byte 102,15,58,34,253,3
movl 240(%edx),%ecx
bswap %ebx
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movdqa (%esp),%xmm2
.byte 102,15,58,34,195,0
leal 3(%ebx),%ebp
.byte 102,15,58,34,205,0
incl %ebx
.byte 102,15,58,34,195,1
incl %ebp
.byte 102,15,58,34,205,1
incl %ebx
.byte 102,15,58,34,195,2
incl %ebp
.byte 102,15,58,34,205,2
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
movdqu (%edx),%xmm6
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
pshufd $192,%xmm0,%xmm2
pshufd $128,%xmm0,%xmm3
cmpl $6,%eax
jb .L007ctr32_tail
pxor %xmm6,%xmm7
shll $4,%ecx
movl $16,%ebx
movdqa %xmm7,32(%esp)
movl %edx,%ebp
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
subl $6,%eax
jmp .L008ctr32_loop6
.align 16
.L008ctr32_loop6:
pshufd $64,%xmm0,%xmm4
movdqa 32(%esp),%xmm0
pshufd $192,%xmm1,%xmm5
pxor %xmm0,%xmm2
pshufd $128,%xmm1,%xmm6
pxor %xmm0,%xmm3
pshufd $64,%xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
.byte 102,15,56,220,209
pxor %xmm0,%xmm6
pxor %xmm0,%xmm7
.byte 102,15,56,220,217
movups 32(%ebp),%xmm0
movl %ebx,%ecx
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
call .L_aesni_encrypt6_enter
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups %xmm2,(%edi)
movdqa 16(%esp),%xmm0
xorps %xmm1,%xmm4
movdqa 64(%esp),%xmm1
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
paddd %xmm0,%xmm1
paddd 48(%esp),%xmm0
movdqa (%esp),%xmm2
movups 48(%esi),%xmm3
movups 64(%esi),%xmm4
xorps %xmm3,%xmm5
movups 80(%esi),%xmm3
leal 96(%esi),%esi
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
xorps %xmm4,%xmm6
movups %xmm5,48(%edi)
xorps %xmm3,%xmm7
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
movups %xmm6,64(%edi)
pshufd $192,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
pshufd $128,%xmm0,%xmm3
subl $6,%eax
jnc .L008ctr32_loop6
addl $6,%eax
jz .L009ctr32_ret
movdqu (%ebp),%xmm7
movl %ebp,%edx
pxor 32(%esp),%xmm7
movl 240(%ebp),%ecx
.L007ctr32_tail:
por %xmm7,%xmm2
cmpl $2,%eax
jb .L010ctr32_one
pshufd $64,%xmm0,%xmm4
por %xmm7,%xmm3
je .L011ctr32_two
pshufd $192,%xmm1,%xmm5
por %xmm7,%xmm4
cmpl $4,%eax
jb .L012ctr32_three
pshufd $128,%xmm1,%xmm6
por %xmm7,%xmm5
je .L013ctr32_four
por %xmm7,%xmm6
call _aesni_encrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups 48(%esi),%xmm0
xorps %xmm1,%xmm4
movups 64(%esi),%xmm1
xorps %xmm0,%xmm5
movups %xmm2,(%edi)
xorps %xmm1,%xmm6
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp .L009ctr32_ret
.align 16
.L006ctr32_one_shortcut:
movups (%ebx),%xmm2
movl 240(%edx),%ecx
.L010ctr32_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L014enc1_loop_1:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L014enc1_loop_1
.byte 102,15,56,221,209
movups (%esi),%xmm6
xorps %xmm2,%xmm6
movups %xmm6,(%edi)
jmp .L009ctr32_ret
.align 16
.L011ctr32_two:
call _aesni_encrypt2
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp .L009ctr32_ret
.align 16
.L012ctr32_three:
call _aesni_encrypt3
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
movups 32(%esi),%xmm7
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
xorps %xmm7,%xmm4
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp .L009ctr32_ret
.align 16
.L013ctr32_four:
call _aesni_encrypt4
movups (%esi),%xmm6
movups 16(%esi),%xmm7
movups 32(%esi),%xmm1
xorps %xmm6,%xmm2
movups 48(%esi),%xmm0
xorps %xmm7,%xmm3
movups %xmm2,(%edi)
xorps %xmm1,%xmm4
movups %xmm3,16(%edi)
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
.L009ctr32_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movl 80(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ctr32_encrypt_blocks,.-.L_aes_hw_ctr32_encrypt_blocks_begin
.globl aes_hw_set_encrypt_key_base
.hidden aes_hw_set_encrypt_key_base
.type aes_hw_set_encrypt_key_base,@function
.align 16
aes_hw_set_encrypt_key_base:
.L_aes_hw_set_encrypt_key_base_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L015pic_for_function_hit
.L015pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+3-.L015pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
pushl %ebx
call .L016pic
.L016pic:
popl %ebx
leal .Lkey_const-.L016pic(%ebx),%ebx
movups (%eax),%xmm0
xorps %xmm4,%xmm4
leal 16(%edx),%edx
cmpl $256,%ecx
je .L01714rounds
cmpl $128,%ecx
jne .L018bad_keybits
.align 16
.L01910rounds:
movl $9,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,200,1
call .L020key_128_cold
.byte 102,15,58,223,200,2
call .L021key_128
.byte 102,15,58,223,200,4
call .L021key_128
.byte 102,15,58,223,200,8
call .L021key_128
.byte 102,15,58,223,200,16
call .L021key_128
.byte 102,15,58,223,200,32
call .L021key_128
.byte 102,15,58,223,200,64
call .L021key_128
.byte 102,15,58,223,200,128
call .L021key_128
.byte 102,15,58,223,200,27
call .L021key_128
.byte 102,15,58,223,200,54
call .L021key_128
movups %xmm0,(%edx)
movl %ecx,80(%edx)
jmp .L022good_key
.align 16
.L021key_128:
movups %xmm0,(%edx)
leal 16(%edx),%edx
.L020key_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 16
.L01714rounds:
movups 16(%eax),%xmm2
leal 16(%edx),%edx
movl $13,%ecx
movups %xmm0,-32(%edx)
movups %xmm2,-16(%edx)
.byte 102,15,58,223,202,1
call .L023key_256a_cold
.byte 102,15,58,223,200,1
call .L024key_256b
.byte 102,15,58,223,202,2
call .L025key_256a
.byte 102,15,58,223,200,2
call .L024key_256b
.byte 102,15,58,223,202,4
call .L025key_256a
.byte 102,15,58,223,200,4
call .L024key_256b
.byte 102,15,58,223,202,8
call .L025key_256a
.byte 102,15,58,223,200,8
call .L024key_256b
.byte 102,15,58,223,202,16
call .L025key_256a
.byte 102,15,58,223,200,16
call .L024key_256b
.byte 102,15,58,223,202,32
call .L025key_256a
.byte 102,15,58,223,200,32
call .L024key_256b
.byte 102,15,58,223,202,64
call .L025key_256a
movups %xmm0,(%edx)
movl %ecx,16(%edx)
xorl %eax,%eax
jmp .L022good_key
.align 16
.L025key_256a:
movups %xmm2,(%edx)
leal 16(%edx),%edx
.L023key_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 16
.L024key_256b:
movups %xmm0,(%edx)
leal 16(%edx),%edx
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.L022good_key:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
xorl %eax,%eax
popl %ebx
ret
.align 4
.L018bad_keybits:
pxor %xmm0,%xmm0
movl $-2,%eax
popl %ebx
ret
.size aes_hw_set_encrypt_key_base,.-.L_aes_hw_set_encrypt_key_base_begin
.globl aes_hw_set_encrypt_key_alt
.hidden aes_hw_set_encrypt_key_alt
.type aes_hw_set_encrypt_key_alt,@function
.align 16
aes_hw_set_encrypt_key_alt:
.L_aes_hw_set_encrypt_key_alt_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L026pic_for_function_hit
.L026pic_for_function_hit:
popl %ebx
leal BORINGSSL_function_hit+3-.L026pic_for_function_hit(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
pushl %ebx
call .L027pic
.L027pic:
popl %ebx
leal .Lkey_const-.L027pic(%ebx),%ebx
movups (%eax),%xmm0
xorps %xmm4,%xmm4
leal 16(%edx),%edx
cmpl $256,%ecx
je .L02814rounds_alt
cmpl $128,%ecx
jne .L029bad_keybits
.align 16
.L03010rounds_alt:
movdqa (%ebx),%xmm5
movl $8,%ecx
movdqa 32(%ebx),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,-16(%edx)
.L031loop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leal 16(%edx),%edx
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%edx)
movdqa %xmm0,%xmm2
decl %ecx
jnz .L031loop_key128
movdqa 48(%ebx),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%edx)
movl $9,%ecx
movl %ecx,96(%edx)
jmp .L032good_key
.align 16
.L02814rounds_alt:
movups 16(%eax),%xmm2
leal 16(%edx),%edx
movdqa (%ebx),%xmm5
movdqa 32(%ebx),%xmm4
movl $7,%ecx
movdqu %xmm0,-32(%edx)
movdqa %xmm2,%xmm1
movdqu %xmm2,-16(%edx)
.L033loop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
decl %ecx
jz .L034done_key256
pshufd $255,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%edx)
leal 32(%edx),%edx
movdqa %xmm2,%xmm1
jmp .L033loop_key256
.L034done_key256:
movl $13,%ecx
movl %ecx,16(%edx)
.L032good_key:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
xorl %eax,%eax
popl %ebx
ret
.align 4
.L029bad_keybits:
pxor %xmm0,%xmm0
movl $-2,%eax
popl %ebx
ret
.size aes_hw_set_encrypt_key_alt,.-.L_aes_hw_set_encrypt_key_alt_begin
.align 64
.Lkey_const:
.long 202313229,202313229,202313229,202313229
.long 67569157,67569157,67569157,67569157
.long 1,1,1,1
.long 27,27,27,27
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69
.byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83
.byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115
.byte 115,108,46,111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 47,706
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha512-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _sha512_block_data_order_nohw
.private_extern _sha512_block_data_order_nohw
.p2align 4
_sha512_block_data_order_nohw:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $128+32,%rsp
leaq (%rsi,%rdx,8),%rdx
andq $-64,%rsp
movq %rdi,128+0(%rsp)
movq %rsi,128+8(%rsp)
movq %rdx,128+16(%rsp)
movq %rax,152(%rsp)
L$prologue:
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
movq 24(%rdi),%rdx
movq 32(%rdi),%r8
movq 40(%rdi),%r9
movq 48(%rdi),%r10
movq 56(%rdi),%r11
jmp L$loop
.p2align 4
L$loop:
movq %rbx,%rdi
leaq K512(%rip),%rbp
xorq %rcx,%rdi
movq 0(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,0(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
addq %r14,%r11
movq 8(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,8(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
addq %r14,%r10
movq 16(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,16(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
addq %r14,%r9
movq 24(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,24(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
addq %r14,%r8
movq 32(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,32(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
addq %r14,%rdx
movq 40(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,40(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
addq %r14,%rcx
movq 48(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,48(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
addq %r14,%rbx
movq 56(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,56(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
addq %r14,%rax
movq 64(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,64(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
addq %r14,%r11
movq 72(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,72(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
addq %r14,%r10
movq 80(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,80(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
addq %r14,%r9
movq 88(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,88(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
addq %r14,%r8
movq 96(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,96(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
addq %r14,%rdx
movq 104(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,104(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
addq %r14,%rcx
movq 112(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,112(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
addq %r14,%rbx
movq 120(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,120(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
jmp L$rounds_16_xx
.p2align 4
L$rounds_16_xx:
movq 8(%rsp),%r13
movq 112(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rax
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 72(%rsp),%r12
addq 0(%rsp),%r12
movq %r8,%r13
addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,0(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
movq 16(%rsp),%r13
movq 120(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r11
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 80(%rsp),%r12
addq 8(%rsp),%r12
movq %rdx,%r13
addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,8(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
movq 24(%rsp),%r13
movq 0(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r10
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 88(%rsp),%r12
addq 16(%rsp),%r12
movq %rcx,%r13
addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,16(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
movq 32(%rsp),%r13
movq 8(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r9
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 96(%rsp),%r12
addq 24(%rsp),%r12
movq %rbx,%r13
addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,24(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
movq 40(%rsp),%r13
movq 16(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r8
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 104(%rsp),%r12
addq 32(%rsp),%r12
movq %rax,%r13
addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,32(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
movq 48(%rsp),%r13
movq 24(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rdx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 112(%rsp),%r12
addq 40(%rsp),%r12
movq %r11,%r13
addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,40(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
movq 56(%rsp),%r13
movq 32(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rcx
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 120(%rsp),%r12
addq 48(%rsp),%r12
movq %r10,%r13
addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,48(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
movq 64(%rsp),%r13
movq 40(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rbx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 0(%rsp),%r12
addq 56(%rsp),%r12
movq %r9,%r13
addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,56(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
movq 72(%rsp),%r13
movq 48(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rax
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 8(%rsp),%r12
addq 64(%rsp),%r12
movq %r8,%r13
addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,64(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
movq 80(%rsp),%r13
movq 56(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r11
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 16(%rsp),%r12
addq 72(%rsp),%r12
movq %rdx,%r13
addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,72(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
movq 88(%rsp),%r13
movq 64(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r10
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 24(%rsp),%r12
addq 80(%rsp),%r12
movq %rcx,%r13
addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,80(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
movq 96(%rsp),%r13
movq 72(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r9
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 32(%rsp),%r12
addq 88(%rsp),%r12
movq %rbx,%r13
addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,88(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
movq 104(%rsp),%r13
movq 80(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r8
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 40(%rsp),%r12
addq 96(%rsp),%r12
movq %rax,%r13
addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,96(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
movq 112(%rsp),%r13
movq 88(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rdx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 48(%rsp),%r12
addq 104(%rsp),%r12
movq %r11,%r13
addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,104(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
movq 120(%rsp),%r13
movq 96(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rcx
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 56(%rsp),%r12
addq 112(%rsp),%r12
movq %r10,%r13
addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,112(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
movq 0(%rsp),%r13
movq 104(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rbx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 64(%rsp),%r12
addq 120(%rsp),%r12
movq %r9,%r13
addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,120(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
cmpb $0,7(%rbp)
jnz L$rounds_16_xx
movq 128+0(%rsp),%rdi
addq %r14,%rax
leaq 128(%rsi),%rsi
addq 0(%rdi),%rax
addq 8(%rdi),%rbx
addq 16(%rdi),%rcx
addq 24(%rdi),%rdx
addq 32(%rdi),%r8
addq 40(%rdi),%r9
addq 48(%rdi),%r10
addq 56(%rdi),%r11
cmpq 128+16(%rsp),%rsi
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
movq %rcx,16(%rdi)
movq %rdx,24(%rdi)
movq %r8,32(%rdi)
movq %r9,40(%rdi)
movq %r10,48(%rdi)
movq %r11,56(%rdi)
jb L$loop
movq 152(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue:
ret
.section __DATA,__const
.p2align 6
K512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl _sha512_block_data_order_avx
.private_extern _sha512_block_data_order_avx
.p2align 6
_sha512_block_data_order_avx:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $160,%rsp
leaq (%rsi,%rdx,8),%rdx
andq $-64,%rsp
movq %rdi,128+0(%rsp)
movq %rsi,128+8(%rsp)
movq %rdx,128+16(%rsp)
movq %rax,152(%rsp)
L$prologue_avx:
vzeroupper
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
movq 24(%rdi),%rdx
movq 32(%rdi),%r8
movq 40(%rdi),%r9
movq 48(%rdi),%r10
movq 56(%rdi),%r11
jmp L$loop_avx
.p2align 4
L$loop_avx:
vmovdqa K512+1280(%rip),%xmm11
vmovdqu 0(%rsi),%xmm0
leaq K512+128(%rip),%rbp
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vpshufb %xmm11,%xmm0,%xmm0
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm11,%xmm1,%xmm1
vmovdqu 64(%rsi),%xmm4
vpshufb %xmm11,%xmm2,%xmm2
vmovdqu 80(%rsi),%xmm5
vpshufb %xmm11,%xmm3,%xmm3
vmovdqu 96(%rsi),%xmm6
vpshufb %xmm11,%xmm4,%xmm4
vmovdqu 112(%rsi),%xmm7
vpshufb %xmm11,%xmm5,%xmm5
vpaddq -128(%rbp),%xmm0,%xmm8
vpshufb %xmm11,%xmm6,%xmm6
vpaddq -96(%rbp),%xmm1,%xmm9
vpshufb %xmm11,%xmm7,%xmm7
vpaddq -64(%rbp),%xmm2,%xmm10
vpaddq -32(%rbp),%xmm3,%xmm11
vmovdqa %xmm8,0(%rsp)
vpaddq 0(%rbp),%xmm4,%xmm8
vmovdqa %xmm9,16(%rsp)
vpaddq 32(%rbp),%xmm5,%xmm9
vmovdqa %xmm10,32(%rsp)
vpaddq 64(%rbp),%xmm6,%xmm10
vmovdqa %xmm11,48(%rsp)
vpaddq 96(%rbp),%xmm7,%xmm11
vmovdqa %xmm8,64(%rsp)
movq %rax,%r14
vmovdqa %xmm9,80(%rsp)
movq %rbx,%rdi
vmovdqa %xmm10,96(%rsp)
xorq %rcx,%rdi
vmovdqa %xmm11,112(%rsp)
movq %r8,%r13
jmp L$avx_00_47
.p2align 4
L$avx_00_47:
addq $256,%rbp
vpalignr $8,%xmm0,%xmm1,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rax
vpalignr $8,%xmm4,%xmm5,%xmm11
movq %r9,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r8,%r13
xorq %r10,%r12
vpaddq %xmm11,%xmm0,%xmm0
shrdq $4,%r13,%r13
xorq %rax,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r8,%r12
xorq %r8,%r13
vpsllq $56,%xmm8,%xmm9
addq 0(%rsp),%r11
movq %rax,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r10,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rbx,%r15
addq %r12,%r11
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rax,%r14
addq %r13,%r11
vpxor %xmm10,%xmm8,%xmm8
xorq %rbx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm7,%xmm11
addq %r11,%rdx
addq %rdi,%r11
vpxor %xmm9,%xmm8,%xmm8
movq %rdx,%r13
addq %r11,%r14
vpsllq $3,%xmm7,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r11
vpaddq %xmm8,%xmm0,%xmm0
movq %r8,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm7,%xmm9
xorq %rdx,%r13
xorq %r9,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r11,%r14
vpsllq $42,%xmm10,%xmm10
andq %rdx,%r12
xorq %rdx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 8(%rsp),%r10
movq %r11,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r9,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rax,%rdi
addq %r12,%r10
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm0,%xmm0
xorq %r11,%r14
addq %r13,%r10
vpaddq -128(%rbp),%xmm0,%xmm10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
vmovdqa %xmm10,0(%rsp)
vpalignr $8,%xmm1,%xmm2,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r10
vpalignr $8,%xmm5,%xmm6,%xmm11
movq %rdx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rcx,%r13
xorq %r8,%r12
vpaddq %xmm11,%xmm1,%xmm1
shrdq $4,%r13,%r13
xorq %r10,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rcx,%r12
xorq %rcx,%r13
vpsllq $56,%xmm8,%xmm9
addq 16(%rsp),%r9
movq %r10,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r8,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r11,%r15
addq %r12,%r9
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r10,%r14
addq %r13,%r9
vpxor %xmm10,%xmm8,%xmm8
xorq %r11,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm0,%xmm11
addq %r9,%rbx
addq %rdi,%r9
vpxor %xmm9,%xmm8,%xmm8
movq %rbx,%r13
addq %r9,%r14
vpsllq $3,%xmm0,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r9
vpaddq %xmm8,%xmm1,%xmm1
movq %rcx,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm0,%xmm9
xorq %rbx,%r13
xorq %rdx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r9,%r14
vpsllq $42,%xmm10,%xmm10
andq %rbx,%r12
xorq %rbx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 24(%rsp),%r8
movq %r9,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rdx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r10,%rdi
addq %r12,%r8
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm1,%xmm1
xorq %r9,%r14
addq %r13,%r8
vpaddq -96(%rbp),%xmm1,%xmm10
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
vmovdqa %xmm10,16(%rsp)
vpalignr $8,%xmm2,%xmm3,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r8
vpalignr $8,%xmm6,%xmm7,%xmm11
movq %rbx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rax,%r13
xorq %rcx,%r12
vpaddq %xmm11,%xmm2,%xmm2
shrdq $4,%r13,%r13
xorq %r8,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rax,%r12
xorq %rax,%r13
vpsllq $56,%xmm8,%xmm9
addq 32(%rsp),%rdx
movq %r8,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rcx,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r9,%r15
addq %r12,%rdx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r8,%r14
addq %r13,%rdx
vpxor %xmm10,%xmm8,%xmm8
xorq %r9,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm1,%xmm11
addq %rdx,%r11
addq %rdi,%rdx
vpxor %xmm9,%xmm8,%xmm8
movq %r11,%r13
addq %rdx,%r14
vpsllq $3,%xmm1,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rdx
vpaddq %xmm8,%xmm2,%xmm2
movq %rax,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm1,%xmm9
xorq %r11,%r13
xorq %rbx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rdx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r11,%r12
xorq %r11,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 40(%rsp),%rcx
movq %rdx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rbx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r8,%rdi
addq %r12,%rcx
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm2,%xmm2
xorq %rdx,%r14
addq %r13,%rcx
vpaddq -64(%rbp),%xmm2,%xmm10
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
vmovdqa %xmm10,32(%rsp)
vpalignr $8,%xmm3,%xmm4,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rcx
vpalignr $8,%xmm7,%xmm0,%xmm11
movq %r11,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r10,%r13
xorq %rax,%r12
vpaddq %xmm11,%xmm3,%xmm3
shrdq $4,%r13,%r13
xorq %rcx,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r10,%r12
xorq %r10,%r13
vpsllq $56,%xmm8,%xmm9
addq 48(%rsp),%rbx
movq %rcx,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rax,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rdx,%r15
addq %r12,%rbx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rcx,%r14
addq %r13,%rbx
vpxor %xmm10,%xmm8,%xmm8
xorq %rdx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm2,%xmm11
addq %rbx,%r9
addq %rdi,%rbx
vpxor %xmm9,%xmm8,%xmm8
movq %r9,%r13
addq %rbx,%r14
vpsllq $3,%xmm2,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rbx
vpaddq %xmm8,%xmm3,%xmm3
movq %r10,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm2,%xmm9
xorq %r9,%r13
xorq %r11,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rbx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r9,%r12
xorq %r9,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 56(%rsp),%rax
movq %rbx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r11,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rcx,%rdi
addq %r12,%rax
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm3,%xmm3
xorq %rbx,%r14
addq %r13,%rax
vpaddq -32(%rbp),%xmm3,%xmm10
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
vmovdqa %xmm10,48(%rsp)
vpalignr $8,%xmm4,%xmm5,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rax
vpalignr $8,%xmm0,%xmm1,%xmm11
movq %r9,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r8,%r13
xorq %r10,%r12
vpaddq %xmm11,%xmm4,%xmm4
shrdq $4,%r13,%r13
xorq %rax,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r8,%r12
xorq %r8,%r13
vpsllq $56,%xmm8,%xmm9
addq 64(%rsp),%r11
movq %rax,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r10,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rbx,%r15
addq %r12,%r11
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rax,%r14
addq %r13,%r11
vpxor %xmm10,%xmm8,%xmm8
xorq %rbx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm3,%xmm11
addq %r11,%rdx
addq %rdi,%r11
vpxor %xmm9,%xmm8,%xmm8
movq %rdx,%r13
addq %r11,%r14
vpsllq $3,%xmm3,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r11
vpaddq %xmm8,%xmm4,%xmm4
movq %r8,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm3,%xmm9
xorq %rdx,%r13
xorq %r9,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r11,%r14
vpsllq $42,%xmm10,%xmm10
andq %rdx,%r12
xorq %rdx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 72(%rsp),%r10
movq %r11,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r9,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rax,%rdi
addq %r12,%r10
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm4,%xmm4
xorq %r11,%r14
addq %r13,%r10
vpaddq 0(%rbp),%xmm4,%xmm10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
vmovdqa %xmm10,64(%rsp)
vpalignr $8,%xmm5,%xmm6,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r10
vpalignr $8,%xmm1,%xmm2,%xmm11
movq %rdx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rcx,%r13
xorq %r8,%r12
vpaddq %xmm11,%xmm5,%xmm5
shrdq $4,%r13,%r13
xorq %r10,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rcx,%r12
xorq %rcx,%r13
vpsllq $56,%xmm8,%xmm9
addq 80(%rsp),%r9
movq %r10,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r8,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r11,%r15
addq %r12,%r9
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r10,%r14
addq %r13,%r9
vpxor %xmm10,%xmm8,%xmm8
xorq %r11,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm4,%xmm11
addq %r9,%rbx
addq %rdi,%r9
vpxor %xmm9,%xmm8,%xmm8
movq %rbx,%r13
addq %r9,%r14
vpsllq $3,%xmm4,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r9
vpaddq %xmm8,%xmm5,%xmm5
movq %rcx,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm4,%xmm9
xorq %rbx,%r13
xorq %rdx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r9,%r14
vpsllq $42,%xmm10,%xmm10
andq %rbx,%r12
xorq %rbx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 88(%rsp),%r8
movq %r9,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rdx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r10,%rdi
addq %r12,%r8
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm5,%xmm5
xorq %r9,%r14
addq %r13,%r8
vpaddq 32(%rbp),%xmm5,%xmm10
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
vmovdqa %xmm10,80(%rsp)
vpalignr $8,%xmm6,%xmm7,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r8
vpalignr $8,%xmm2,%xmm3,%xmm11
movq %rbx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rax,%r13
xorq %rcx,%r12
vpaddq %xmm11,%xmm6,%xmm6
shrdq $4,%r13,%r13
xorq %r8,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rax,%r12
xorq %rax,%r13
vpsllq $56,%xmm8,%xmm9
addq 96(%rsp),%rdx
movq %r8,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rcx,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r9,%r15
addq %r12,%rdx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r8,%r14
addq %r13,%rdx
vpxor %xmm10,%xmm8,%xmm8
xorq %r9,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm5,%xmm11
addq %rdx,%r11
addq %rdi,%rdx
vpxor %xmm9,%xmm8,%xmm8
movq %r11,%r13
addq %rdx,%r14
vpsllq $3,%xmm5,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rdx
vpaddq %xmm8,%xmm6,%xmm6
movq %rax,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm5,%xmm9
xorq %r11,%r13
xorq %rbx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rdx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r11,%r12
xorq %r11,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 104(%rsp),%rcx
movq %rdx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rbx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r8,%rdi
addq %r12,%rcx
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm6,%xmm6
xorq %rdx,%r14
addq %r13,%rcx
vpaddq 64(%rbp),%xmm6,%xmm10
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
vmovdqa %xmm10,96(%rsp)
vpalignr $8,%xmm7,%xmm0,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rcx
vpalignr $8,%xmm3,%xmm4,%xmm11
movq %r11,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r10,%r13
xorq %rax,%r12
vpaddq %xmm11,%xmm7,%xmm7
shrdq $4,%r13,%r13
xorq %rcx,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r10,%r12
xorq %r10,%r13
vpsllq $56,%xmm8,%xmm9
addq 112(%rsp),%rbx
movq %rcx,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rax,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rdx,%r15
addq %r12,%rbx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rcx,%r14
addq %r13,%rbx
vpxor %xmm10,%xmm8,%xmm8
xorq %rdx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm6,%xmm11
addq %rbx,%r9
addq %rdi,%rbx
vpxor %xmm9,%xmm8,%xmm8
movq %r9,%r13
addq %rbx,%r14
vpsllq $3,%xmm6,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rbx
vpaddq %xmm8,%xmm7,%xmm7
movq %r10,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm6,%xmm9
xorq %r9,%r13
xorq %r11,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rbx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r9,%r12
xorq %r9,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 120(%rsp),%rax
movq %rbx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r11,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rcx,%rdi
addq %r12,%rax
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm7,%xmm7
xorq %rbx,%r14
addq %r13,%rax
vpaddq 96(%rbp),%xmm7,%xmm10
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
vmovdqa %xmm10,112(%rsp)
cmpb $0,135(%rbp)
jne L$avx_00_47
shrdq $23,%r13,%r13
movq %r14,%rax
movq %r9,%r12
shrdq $5,%r14,%r14
xorq %r8,%r13
xorq %r10,%r12
shrdq $4,%r13,%r13
xorq %rax,%r14
andq %r8,%r12
xorq %r8,%r13
addq 0(%rsp),%r11
movq %rax,%r15
xorq %r10,%r12
shrdq $6,%r14,%r14
xorq %rbx,%r15
addq %r12,%r11
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rax,%r14
addq %r13,%r11
xorq %rbx,%rdi
shrdq $28,%r14,%r14
addq %r11,%rdx
addq %rdi,%r11
movq %rdx,%r13
addq %r11,%r14
shrdq $23,%r13,%r13
movq %r14,%r11
movq %r8,%r12
shrdq $5,%r14,%r14
xorq %rdx,%r13
xorq %r9,%r12
shrdq $4,%r13,%r13
xorq %r11,%r14
andq %rdx,%r12
xorq %rdx,%r13
addq 8(%rsp),%r10
movq %r11,%rdi
xorq %r9,%r12
shrdq $6,%r14,%r14
xorq %rax,%rdi
addq %r12,%r10
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r11,%r14
addq %r13,%r10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
shrdq $23,%r13,%r13
movq %r14,%r10
movq %rdx,%r12
shrdq $5,%r14,%r14
xorq %rcx,%r13
xorq %r8,%r12
shrdq $4,%r13,%r13
xorq %r10,%r14
andq %rcx,%r12
xorq %rcx,%r13
addq 16(%rsp),%r9
movq %r10,%r15
xorq %r8,%r12
shrdq $6,%r14,%r14
xorq %r11,%r15
addq %r12,%r9
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r10,%r14
addq %r13,%r9
xorq %r11,%rdi
shrdq $28,%r14,%r14
addq %r9,%rbx
addq %rdi,%r9
movq %rbx,%r13
addq %r9,%r14
shrdq $23,%r13,%r13
movq %r14,%r9
movq %rcx,%r12
shrdq $5,%r14,%r14
xorq %rbx,%r13
xorq %rdx,%r12
shrdq $4,%r13,%r13
xorq %r9,%r14
andq %rbx,%r12
xorq %rbx,%r13
addq 24(%rsp),%r8
movq %r9,%rdi
xorq %rdx,%r12
shrdq $6,%r14,%r14
xorq %r10,%rdi
addq %r12,%r8
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r9,%r14
addq %r13,%r8
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
shrdq $23,%r13,%r13
movq %r14,%r8
movq %rbx,%r12
shrdq $5,%r14,%r14
xorq %rax,%r13
xorq %rcx,%r12
shrdq $4,%r13,%r13
xorq %r8,%r14
andq %rax,%r12
xorq %rax,%r13
addq 32(%rsp),%rdx
movq %r8,%r15
xorq %rcx,%r12
shrdq $6,%r14,%r14
xorq %r9,%r15
addq %r12,%rdx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r8,%r14
addq %r13,%rdx
xorq %r9,%rdi
shrdq $28,%r14,%r14
addq %rdx,%r11
addq %rdi,%rdx
movq %r11,%r13
addq %rdx,%r14
shrdq $23,%r13,%r13
movq %r14,%rdx
movq %rax,%r12
shrdq $5,%r14,%r14
xorq %r11,%r13
xorq %rbx,%r12
shrdq $4,%r13,%r13
xorq %rdx,%r14
andq %r11,%r12
xorq %r11,%r13
addq 40(%rsp),%rcx
movq %rdx,%rdi
xorq %rbx,%r12
shrdq $6,%r14,%r14
xorq %r8,%rdi
addq %r12,%rcx
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rdx,%r14
addq %r13,%rcx
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
shrdq $23,%r13,%r13
movq %r14,%rcx
movq %r11,%r12
shrdq $5,%r14,%r14
xorq %r10,%r13
xorq %rax,%r12
shrdq $4,%r13,%r13
xorq %rcx,%r14
andq %r10,%r12
xorq %r10,%r13
addq 48(%rsp),%rbx
movq %rcx,%r15
xorq %rax,%r12
shrdq $6,%r14,%r14
xorq %rdx,%r15
addq %r12,%rbx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rcx,%r14
addq %r13,%rbx
xorq %rdx,%rdi
shrdq $28,%r14,%r14
addq %rbx,%r9
addq %rdi,%rbx
movq %r9,%r13
addq %rbx,%r14
shrdq $23,%r13,%r13
movq %r14,%rbx
movq %r10,%r12
shrdq $5,%r14,%r14
xorq %r9,%r13
xorq %r11,%r12
shrdq $4,%r13,%r13
xorq %rbx,%r14
andq %r9,%r12
xorq %r9,%r13
addq 56(%rsp),%rax
movq %rbx,%rdi
xorq %r11,%r12
shrdq $6,%r14,%r14
xorq %rcx,%rdi
addq %r12,%rax
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rbx,%r14
addq %r13,%rax
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
shrdq $23,%r13,%r13
movq %r14,%rax
movq %r9,%r12
shrdq $5,%r14,%r14
xorq %r8,%r13
xorq %r10,%r12
shrdq $4,%r13,%r13
xorq %rax,%r14
andq %r8,%r12
xorq %r8,%r13
addq 64(%rsp),%r11
movq %rax,%r15
xorq %r10,%r12
shrdq $6,%r14,%r14
xorq %rbx,%r15
addq %r12,%r11
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rax,%r14
addq %r13,%r11
xorq %rbx,%rdi
shrdq $28,%r14,%r14
addq %r11,%rdx
addq %rdi,%r11
movq %rdx,%r13
addq %r11,%r14
shrdq $23,%r13,%r13
movq %r14,%r11
movq %r8,%r12
shrdq $5,%r14,%r14
xorq %rdx,%r13
xorq %r9,%r12
shrdq $4,%r13,%r13
xorq %r11,%r14
andq %rdx,%r12
xorq %rdx,%r13
addq 72(%rsp),%r10
movq %r11,%rdi
xorq %r9,%r12
shrdq $6,%r14,%r14
xorq %rax,%rdi
addq %r12,%r10
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r11,%r14
addq %r13,%r10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
shrdq $23,%r13,%r13
movq %r14,%r10
movq %rdx,%r12
shrdq $5,%r14,%r14
xorq %rcx,%r13
xorq %r8,%r12
shrdq $4,%r13,%r13
xorq %r10,%r14
andq %rcx,%r12
xorq %rcx,%r13
addq 80(%rsp),%r9
movq %r10,%r15
xorq %r8,%r12
shrdq $6,%r14,%r14
xorq %r11,%r15
addq %r12,%r9
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r10,%r14
addq %r13,%r9
xorq %r11,%rdi
shrdq $28,%r14,%r14
addq %r9,%rbx
addq %rdi,%r9
movq %rbx,%r13
addq %r9,%r14
shrdq $23,%r13,%r13
movq %r14,%r9
movq %rcx,%r12
shrdq $5,%r14,%r14
xorq %rbx,%r13
xorq %rdx,%r12
shrdq $4,%r13,%r13
xorq %r9,%r14
andq %rbx,%r12
xorq %rbx,%r13
addq 88(%rsp),%r8
movq %r9,%rdi
xorq %rdx,%r12
shrdq $6,%r14,%r14
xorq %r10,%rdi
addq %r12,%r8
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r9,%r14
addq %r13,%r8
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
shrdq $23,%r13,%r13
movq %r14,%r8
movq %rbx,%r12
shrdq $5,%r14,%r14
xorq %rax,%r13
xorq %rcx,%r12
shrdq $4,%r13,%r13
xorq %r8,%r14
andq %rax,%r12
xorq %rax,%r13
addq 96(%rsp),%rdx
movq %r8,%r15
xorq %rcx,%r12
shrdq $6,%r14,%r14
xorq %r9,%r15
addq %r12,%rdx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r8,%r14
addq %r13,%rdx
xorq %r9,%rdi
shrdq $28,%r14,%r14
addq %rdx,%r11
addq %rdi,%rdx
movq %r11,%r13
addq %rdx,%r14
shrdq $23,%r13,%r13
movq %r14,%rdx
movq %rax,%r12
shrdq $5,%r14,%r14
xorq %r11,%r13
xorq %rbx,%r12
shrdq $4,%r13,%r13
xorq %rdx,%r14
andq %r11,%r12
xorq %r11,%r13
addq 104(%rsp),%rcx
movq %rdx,%rdi
xorq %rbx,%r12
shrdq $6,%r14,%r14
xorq %r8,%rdi
addq %r12,%rcx
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rdx,%r14
addq %r13,%rcx
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
shrdq $23,%r13,%r13
movq %r14,%rcx
movq %r11,%r12
shrdq $5,%r14,%r14
xorq %r10,%r13
xorq %rax,%r12
shrdq $4,%r13,%r13
xorq %rcx,%r14
andq %r10,%r12
xorq %r10,%r13
addq 112(%rsp),%rbx
movq %rcx,%r15
xorq %rax,%r12
shrdq $6,%r14,%r14
xorq %rdx,%r15
addq %r12,%rbx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rcx,%r14
addq %r13,%rbx
xorq %rdx,%rdi
shrdq $28,%r14,%r14
addq %rbx,%r9
addq %rdi,%rbx
movq %r9,%r13
addq %rbx,%r14
shrdq $23,%r13,%r13
movq %r14,%rbx
movq %r10,%r12
shrdq $5,%r14,%r14
xorq %r9,%r13
xorq %r11,%r12
shrdq $4,%r13,%r13
xorq %rbx,%r14
andq %r9,%r12
xorq %r9,%r13
addq 120(%rsp),%rax
movq %rbx,%rdi
xorq %r11,%r12
shrdq $6,%r14,%r14
xorq %rcx,%rdi
addq %r12,%rax
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rbx,%r14
addq %r13,%rax
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
movq 128+0(%rsp),%rdi
movq %r14,%rax
addq 0(%rdi),%rax
leaq 128(%rsi),%rsi
addq 8(%rdi),%rbx
addq 16(%rdi),%rcx
addq 24(%rdi),%rdx
addq 32(%rdi),%r8
addq 40(%rdi),%r9
addq 48(%rdi),%r10
addq 56(%rdi),%r11
cmpq 128+16(%rsp),%rsi
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
movq %rcx,16(%rdi)
movq %rdx,24(%rdi)
movq %r8,32(%rdi)
movq %r9,40(%rdi)
movq %r10,48(%rdi)
movq %r11,56(%rdi)
jb L$loop_avx
movq 152(%rsp),%rsi
vzeroupper
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue_avx:
ret
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 20,463
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/x86_64-mont-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl bn_mul_mont_nohw
.hidden bn_mul_mont_nohw
.type bn_mul_mont_nohw,@function
.align 16
bn_mul_mont_nohw:
.cfi_startproc
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
negq %r9
movq %rsp,%r11
leaq -16(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul_page_walk
jmp .Lmul_page_walk_done
.align 16
.Lmul_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul_page_walk
.Lmul_page_walk_done:
movq %rax,8(%rsp,%r9,8)
.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08
.Lmul_body:
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%r13
leaq 1(%r15),%r15
jmp .L1st_enter
.align 16
.L1st:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r13
movq %r10,%r11
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
.L1st_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 1(%r15),%r15
movq %rdx,%r10
mulq %rbp
cmpq %r9,%r15
jne .L1st
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
movq %r10,%r11
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
jmp .Louter
.align 16
.Louter:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq %r8,%rbp
movq (%rsp),%r10
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq 8(%rsp),%r10
movq %rdx,%r13
leaq 1(%r15),%r15
jmp .Linner_enter
.align 16
.Linner:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
.Linner_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
leaq 1(%r15),%r15
mulq %rbp
cmpq %r9,%r15
jne .Linner
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
cmpq %r9,%r14
jb .Louter
xorq %r14,%r14
movq (%rsp),%rax
movq %r9,%r15
.align 16
.Lsub: sbbq (%rcx,%r14,8),%rax
movq %rax,(%rdi,%r14,8)
movq 8(%rsp,%r14,8),%rax
leaq 1(%r14),%r14
decq %r15
jnz .Lsub
sbbq $0,%rax
movq $-1,%rbx
xorq %rax,%rbx
xorq %r14,%r14
movq %r9,%r15
.Lcopy:
movq (%rdi,%r14,8),%rcx
movq (%rsp,%r14,8),%rdx
andq %rbx,%rcx
andq %rax,%rdx
movq %r9,(%rsp,%r14,8)
orq %rcx,%rdx
movq %rdx,(%rdi,%r14,8)
leaq 1(%r14),%r14
subq $1,%r15
jnz .Lcopy
movq 8(%rsp,%r9,8),%rsi
.cfi_def_cfa %rsi,8
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmul_epilogue:
ret
.cfi_endproc
.size bn_mul_mont_nohw,.-bn_mul_mont_nohw
.globl bn_mul4x_mont
.hidden bn_mul4x_mont
.type bn_mul4x_mont,@function
.align 16
bn_mul4x_mont:
.cfi_startproc
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
negq %r9
movq %rsp,%r11
leaq -32(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul4x_page_walk
jmp .Lmul4x_page_walk_done
.Lmul4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja .Lmul4x_page_walk
.Lmul4x_page_walk_done:
movq %rax,8(%rsp,%r9,8)
.cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08
.Lmul4x_body:
movq %rdi,16(%rsp,%r9,8)
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp .L1st4x
.align 16
.L1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb .L1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
leaq 1(%r14),%r14
.align 4
.Louter4x:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq (%rsp),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%rsp),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp .Linner4x
.align 16
.Linner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq 8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb .Linner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 1(%r14),%r14
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%rsp,%r9,8),%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
cmpq %r9,%r14
jb .Louter4x
movq 16(%rsp,%r9,8),%rdi
leaq -4(%r9),%r15
movq 0(%rsp),%rax
movq 8(%rsp),%rdx
shrq $2,%r15
leaq (%rsp),%rsi
xorq %r14,%r14
subq 0(%rcx),%rax
movq 16(%rsi),%rbx
movq 24(%rsi),%rbp
sbbq 8(%rcx),%rdx
.Lsub4x:
movq %rax,0(%rdi,%r14,8)
movq %rdx,8(%rdi,%r14,8)
sbbq 16(%rcx,%r14,8),%rbx
movq 32(%rsi,%r14,8),%rax
movq 40(%rsi,%r14,8),%rdx
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
movq %rbp,24(%rdi,%r14,8)
sbbq 32(%rcx,%r14,8),%rax
movq 48(%rsi,%r14,8),%rbx
movq 56(%rsi,%r14,8),%rbp
sbbq 40(%rcx,%r14,8),%rdx
leaq 4(%r14),%r14
decq %r15
jnz .Lsub4x
movq %rax,0(%rdi,%r14,8)
movq 32(%rsi,%r14,8),%rax
sbbq 16(%rcx,%r14,8),%rbx
movq %rdx,8(%rdi,%r14,8)
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
sbbq $0,%rax
movq %rbp,24(%rdi,%r14,8)
pxor %xmm0,%xmm0
.byte 102,72,15,110,224
pcmpeqd %xmm5,%xmm5
pshufd $0,%xmm4,%xmm4
movq %r9,%r15
pxor %xmm4,%xmm5
shrq $2,%r15
xorl %eax,%eax
jmp .Lcopy4x
.align 16
.Lcopy4x:
movdqa (%rsp,%rax,1),%xmm1
movdqu (%rdi,%rax,1),%xmm2
pand %xmm4,%xmm1
pand %xmm5,%xmm2
movdqa 16(%rsp,%rax,1),%xmm3
movdqa %xmm0,(%rsp,%rax,1)
por %xmm2,%xmm1
movdqu 16(%rdi,%rax,1),%xmm2
movdqu %xmm1,(%rdi,%rax,1)
pand %xmm4,%xmm3
pand %xmm5,%xmm2
movdqa %xmm0,16(%rsp,%rax,1)
por %xmm2,%xmm3
movdqu %xmm3,16(%rdi,%rax,1)
leaq 32(%rax),%rax
decq %r15
jnz .Lcopy4x
movq 8(%rsp,%r9,8),%rsi
.cfi_def_cfa %rsi, 8
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmul4x_epilogue:
ret
.cfi_endproc
.size bn_mul4x_mont,.-bn_mul4x_mont
.extern bn_sqrx8x_internal
.hidden bn_sqrx8x_internal
.extern bn_sqr8x_internal
.hidden bn_sqr8x_internal
.globl bn_sqr8x_mont
.hidden bn_sqr8x_mont
.type bn_sqr8x_mont,@function
.align 32
bn_sqr8x_mont:
.cfi_startproc
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
.Lsqr8x_prologue:
movl %r9d,%r10d
shll $3,%r9d
shlq $3+2,%r10
negq %r9
leaq -64(%rsp,%r9,2),%r11
movq %rsp,%rbp
movq (%r8),%r8
subq %rsi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb .Lsqr8x_sp_alt
subq %r11,%rbp
leaq -64(%rbp,%r9,2),%rbp
jmp .Lsqr8x_sp_done
.align 32
.Lsqr8x_sp_alt:
leaq 4096-64(,%r9,2),%r10
leaq -64(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
.Lsqr8x_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lsqr8x_page_walk
jmp .Lsqr8x_page_walk_done
.align 16
.Lsqr8x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lsqr8x_page_walk
.Lsqr8x_page_walk_done:
movq %r9,%r10
negq %r9
movq %r8,32(%rsp)
movq %rax,40(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
.Lsqr8x_body:
.byte 102,72,15,110,209
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,73,15,110,218
testq %rdx,%rdx
jz .Lsqr8x_nox
call bn_sqrx8x_internal
leaq (%r8,%rcx,1),%rbx
movq %rcx,%r9
movq %rcx,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp .Lsqr8x_sub
.align 32
.Lsqr8x_nox:
call bn_sqr8x_internal
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
movq %r9,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp .Lsqr8x_sub
.align 32
.Lsqr8x_sub:
movq 0(%rbx),%r12
movq 8(%rbx),%r13
movq 16(%rbx),%r14
movq 24(%rbx),%r15
leaq 32(%rbx),%rbx
sbbq 0(%rbp),%r12
sbbq 8(%rbp),%r13
sbbq 16(%rbp),%r14
sbbq 24(%rbp),%r15
leaq 32(%rbp),%rbp
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz .Lsqr8x_sub
sbbq $0,%rax
leaq (%rbx,%r9,1),%rbx
leaq (%rdi,%r9,1),%rdi
.byte 102,72,15,110,200
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
.cfi_def_cfa %rsi,8
jmp .Lsqr8x_cond_copy
.align 32
.Lsqr8x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
movdqa %xmm0,-32(%rbx,%rdx,1)
movdqa %xmm0,-16(%rbx,%rdx,1)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
addq $32,%r9
jnz .Lsqr8x_cond_copy
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lsqr8x_epilogue:
ret
.cfi_endproc
.size bn_sqr8x_mont,.-bn_sqr8x_mont
.globl bn_mulx4x_mont
.hidden bn_mulx4x_mont
.type bn_mulx4x_mont,@function
.align 32
bn_mulx4x_mont:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
.Lmulx4x_prologue:
shll $3,%r9d
xorq %r10,%r10
subq %r9,%r10
movq (%r8),%r8
leaq -72(%rsp,%r10,1),%rbp
andq $-128,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lmulx4x_page_walk
jmp .Lmulx4x_page_walk_done
.align 16
.Lmulx4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lmulx4x_page_walk
.Lmulx4x_page_walk_done:
leaq (%rdx,%r9,1),%r10
movq %r9,0(%rsp)
shrq $5,%r9
movq %r10,16(%rsp)
subq $1,%r9
movq %r8,24(%rsp)
movq %rdi,32(%rsp)
movq %rax,40(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
movq %r9,48(%rsp)
jmp .Lmulx4x_body
.align 32
.Lmulx4x_body:
leaq 8(%rdx),%rdi
movq (%rdx),%rdx
leaq 64+32(%rsp),%rbx
movq %rdx,%r9
mulxq 0(%rsi),%r8,%rax
mulxq 8(%rsi),%r11,%r14
addq %rax,%r11
movq %rdi,8(%rsp)
mulxq 16(%rsi),%r12,%r13
adcq %r14,%r12
adcq $0,%r13
movq %r8,%rdi
imulq 24(%rsp),%r8
xorq %rbp,%rbp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
leaq 32(%rsi),%rsi
adcxq %rax,%r13
adcxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%rdi
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00
movq 48(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
adcxq %rax,%r12
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r12,-16(%rbx)
jmp .Lmulx4x_1st
.align 32
.Lmulx4x_1st:
adcxq %rbp,%r15
mulxq 0(%rsi),%r10,%rax
adcxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
.byte 0x67,0x67
movq %r8,%rdx
adcxq %rax,%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
movq %r11,-32(%rbx)
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz .Lmulx4x_1st
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
addq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
jmp .Lmulx4x_outer
.align 32
.Lmulx4x_outer:
movq (%rdi),%rdx
leaq 8(%rdi),%rdi
subq %rax,%rsi
movq %r15,(%rbx)
leaq 64+32(%rsp),%rbx
subq %rax,%rcx
mulxq 0(%rsi),%r8,%r11
xorl %ebp,%ebp
movq %rdx,%r9
mulxq 8(%rsi),%r14,%r12
adoxq -32(%rbx),%r8
adcxq %r14,%r11
mulxq 16(%rsi),%r15,%r13
adoxq -24(%rbx),%r11
adcxq %r15,%r12
adoxq -16(%rbx),%r12
adcxq %rbp,%r13
adoxq %rbp,%r13
movq %rdi,8(%rsp)
movq %r8,%r15
imulq 24(%rsp),%r8
xorl %ebp,%ebp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
adcxq %rax,%r13
adoxq -8(%rbx),%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
adoxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
leaq 32(%rcx),%rcx
adcxq %rax,%r12
adoxq %rbp,%r15
movq 48(%rsp),%rdi
movq %r12,-16(%rbx)
jmp .Lmulx4x_inner
.align 32
.Lmulx4x_inner:
mulxq 0(%rsi),%r10,%rax
adcxq %rbp,%r15
adoxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq 0(%rbx),%r10
adoxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq 8(%rbx),%r11
adoxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
movq %r8,%rdx
adcxq 16(%rbx),%r12
adoxq %rax,%r13
adcxq 24(%rbx),%r13
adoxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adcxq %rbp,%r14
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-32(%rbx)
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz .Lmulx4x_inner
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
subq 0(%rbx),%rbp
adcq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
cmpq 16(%rsp),%rdi
jne .Lmulx4x_outer
leaq 64(%rsp),%rbx
subq %rax,%rcx
negq %r15
movq %rax,%rdx
shrq $3+2,%rax
movq 32(%rsp),%rdi
jmp .Lmulx4x_sub
.align 32
.Lmulx4x_sub:
movq 0(%rbx),%r11
movq 8(%rbx),%r12
movq 16(%rbx),%r13
movq 24(%rbx),%r14
leaq 32(%rbx),%rbx
sbbq 0(%rcx),%r11
sbbq 8(%rcx),%r12
sbbq 16(%rcx),%r13
sbbq 24(%rcx),%r14
leaq 32(%rcx),%rcx
movq %r11,0(%rdi)
movq %r12,8(%rdi)
movq %r13,16(%rdi)
movq %r14,24(%rdi)
leaq 32(%rdi),%rdi
decq %rax
jnz .Lmulx4x_sub
sbbq $0,%r15
leaq 64(%rsp),%rbx
subq %rdx,%rdi
.byte 102,73,15,110,207
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
.cfi_def_cfa %rsi,8
jmp .Lmulx4x_cond_copy
.align 32
.Lmulx4x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
subq $32,%rdx
jnz .Lmulx4x_cond_copy
movq %rdx,(%rbx)
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmulx4x_epilogue:
ret
.cfi_endproc
.size bn_mulx4x_mont,.-bn_mulx4x_mont
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 16
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 21,811
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/armv4-mont-linux32.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.arch armv7-a
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.globl bn_mul_mont_nohw
.hidden bn_mul_mont_nohw
.type bn_mul_mont_nohw,%function
.align 5
bn_mul_mont_nohw:
ldr ip,[sp,#4] @ load num
stmdb sp!,{r0,r2} @ sp points at argument block
cmp ip,#2
mov r0,ip @ load num
#ifdef __thumb2__
ittt lt
#endif
movlt r0,#0
addlt sp,sp,#2*4
blt .Labrt
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers
mov r0,r0,lsl#2 @ rescale r0 for byte count
sub sp,sp,r0 @ alloca(4*num)
sub sp,sp,#4 @ +extra dword
sub r0,r0,#4 @ "num=num-1"
add r4,r2,r0 @ &bp[num-1]
add r0,sp,r0 @ r0 to point at &tp[num-1]
ldr r8,[r0,#14*4] @ &n0
ldr r2,[r2] @ bp[0]
ldr r5,[r1],#4 @ ap[0],ap++
ldr r6,[r3],#4 @ np[0],np++
ldr r8,[r8] @ *n0
str r4,[r0,#15*4] @ save &bp[num]
umull r10,r11,r5,r2 @ ap[0]*bp[0]
str r8,[r0,#14*4] @ save n0 value
mul r8,r10,r8 @ "tp[0]"*n0
mov r12,#0
umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]"
mov r4,sp
.L1st:
ldr r5,[r1],#4 @ ap[j],ap++
mov r10,r11
ldr r6,[r3],#4 @ np[j],np++
mov r11,#0
umlal r10,r11,r5,r2 @ ap[j]*bp[0]
mov r14,#0
umlal r12,r14,r6,r8 @ np[j]*n0
adds r12,r12,r10
str r12,[r4],#4 @ tp[j-1]=,tp++
adc r12,r14,#0
cmp r4,r0
bne .L1st
adds r12,r12,r11
ldr r4,[r0,#13*4] @ restore bp
mov r14,#0
ldr r8,[r0,#14*4] @ restore n0
adc r14,r14,#0
str r12,[r0] @ tp[num-1]=
mov r7,sp
str r14,[r0,#4] @ tp[num]=
.Louter:
sub r7,r0,r7 @ "original" r0-1 value
sub r1,r1,r7 @ "rewind" ap to &ap[1]
ldr r2,[r4,#4]! @ *(++bp)
sub r3,r3,r7 @ "rewind" np to &np[1]
ldr r5,[r1,#-4] @ ap[0]
ldr r10,[sp] @ tp[0]
ldr r6,[r3,#-4] @ np[0]
ldr r7,[sp,#4] @ tp[1]
mov r11,#0
umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0]
str r4,[r0,#13*4] @ save bp
mul r8,r10,r8
mov r12,#0
umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]"
mov r4,sp
.Linner:
ldr r5,[r1],#4 @ ap[j],ap++
adds r10,r11,r7 @ +=tp[j]
ldr r6,[r3],#4 @ np[j],np++
mov r11,#0
umlal r10,r11,r5,r2 @ ap[j]*bp[i]
mov r14,#0
umlal r12,r14,r6,r8 @ np[j]*n0
adc r11,r11,#0
ldr r7,[r4,#8] @ tp[j+1]
adds r12,r12,r10
str r12,[r4],#4 @ tp[j-1]=,tp++
adc r12,r14,#0
cmp r4,r0
bne .Linner
adds r12,r12,r11
mov r14,#0
ldr r4,[r0,#13*4] @ restore bp
adc r14,r14,#0
ldr r8,[r0,#14*4] @ restore n0
adds r12,r12,r7
ldr r7,[r0,#15*4] @ restore &bp[num]
adc r14,r14,#0
str r12,[r0] @ tp[num-1]=
str r14,[r0,#4] @ tp[num]=
cmp r4,r7
#ifdef __thumb2__
itt ne
#endif
movne r7,sp
bne .Louter
ldr r2,[r0,#12*4] @ pull rp
mov r5,sp
add r0,r0,#4 @ r0 to point at &tp[num]
sub r5,r0,r5 @ "original" num value
mov r4,sp @ "rewind" r4
mov r1,r4 @ "borrow" r1
sub r3,r3,r5 @ "rewind" r3 to &np[0]
subs r7,r7,r7 @ "clear" carry flag
.Lsub: ldr r7,[r4],#4
ldr r6,[r3],#4
sbcs r7,r7,r6 @ tp[j]-np[j]
str r7,[r2],#4 @ rp[j]=
teq r4,r0 @ preserve carry
bne .Lsub
sbcs r14,r14,#0 @ upmost carry
mov r4,sp @ "rewind" r4
sub r2,r2,r5 @ "rewind" r2
.Lcopy: ldr r7,[r4] @ conditional copy
ldr r5,[r2]
str sp,[r4],#4 @ zap tp
#ifdef __thumb2__
it cc
#endif
movcc r5,r7
str r5,[r2],#4
teq r4,r0 @ preserve carry
bne .Lcopy
mov sp,r0
add sp,sp,#4 @ skip over tp[num+1]
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers
add sp,sp,#2*4 @ skip over {r0,r2}
mov r0,#1
.Labrt:
#if __ARM_ARCH>=5
bx lr @ bx lr
#else
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size bn_mul_mont_nohw,.-bn_mul_mont_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl bn_mul8x_mont_neon
.hidden bn_mul8x_mont_neon
.type bn_mul8x_mont_neon,%function
.align 5
bn_mul8x_mont_neon:
mov ip,sp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldmia ip,{r4,r5} @ load rest of parameter block
mov ip,sp
cmp r5,#8
bhi .LNEON_8n
@ special case for r5==8, everything is in register bank...
vld1.32 {d28[0]}, [r2,:32]!
veor d8,d8,d8
sub r7,sp,r5,lsl#4
vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-(
and r7,r7,#-64
vld1.32 {d30[0]}, [r4,:32]
mov sp,r7 @ alloca
vzip.16 d28,d8
vmull.u32 q6,d28,d0[0]
vmull.u32 q7,d28,d0[1]
vmull.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmull.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
veor d8,d8,d8
vmul.u32 d29,d29,d30
vmull.u32 q10,d28,d2[0]
vld1.32 {d4,d5,d6,d7}, [r3]!
vmull.u32 q11,d28,d2[1]
vmull.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmull.u32 q13,d28,d3[1]
vmlal.u32 q6,d29,d4[0]
sub r9,r5,#1
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmov q5,q6
vmlal.u32 q11,d29,d6[1]
vmov q6,q7
vmlal.u32 q12,d29,d7[0]
vmov q7,q8
vmlal.u32 q13,d29,d7[1]
vmov q8,q9
vmov q9,q10
vshr.u64 d10,d10,#16
vmov q10,q11
vmov q11,q12
vadd.u64 d10,d10,d11
vmov q12,q13
veor q13,q13
vshr.u64 d10,d10,#16
b .LNEON_outer8
.align 4
.LNEON_outer8:
vld1.32 {d28[0]}, [r2,:32]!
veor d8,d8,d8
vzip.16 d28,d8
vadd.u64 d12,d12,d10
vmlal.u32 q6,d28,d0[0]
vmlal.u32 q7,d28,d0[1]
vmlal.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmlal.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
veor d8,d8,d8
subs r9,r9,#1
vmul.u32 d29,d29,d30
vmlal.u32 q10,d28,d2[0]
vmlal.u32 q11,d28,d2[1]
vmlal.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q13,d28,d3[1]
vmlal.u32 q6,d29,d4[0]
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmov q5,q6
vmlal.u32 q11,d29,d6[1]
vmov q6,q7
vmlal.u32 q12,d29,d7[0]
vmov q7,q8
vmlal.u32 q13,d29,d7[1]
vmov q8,q9
vmov q9,q10
vshr.u64 d10,d10,#16
vmov q10,q11
vmov q11,q12
vadd.u64 d10,d10,d11
vmov q12,q13
veor q13,q13
vshr.u64 d10,d10,#16
bne .LNEON_outer8
vadd.u64 d12,d12,d10
mov r7,sp
vshr.u64 d10,d12,#16
mov r8,r5
vadd.u64 d13,d13,d10
add r6,sp,#96
vshr.u64 d10,d13,#16
vzip.16 d12,d13
b .LNEON_tail_entry
.align 4
.LNEON_8n:
veor q6,q6,q6
sub r7,sp,#128
veor q7,q7,q7
sub r7,r7,r5,lsl#4
veor q8,q8,q8
and r7,r7,#-64
veor q9,q9,q9
mov sp,r7 @ alloca
veor q10,q10,q10
add r7,r7,#256
veor q11,q11,q11
sub r8,r5,#8
veor q12,q12,q12
veor q13,q13,q13
.LNEON_8n_init:
vst1.64 {q6,q7},[r7,:256]!
subs r8,r8,#8
vst1.64 {q8,q9},[r7,:256]!
vst1.64 {q10,q11},[r7,:256]!
vst1.64 {q12,q13},[r7,:256]!
bne .LNEON_8n_init
add r6,sp,#256
vld1.32 {d0,d1,d2,d3},[r1]!
add r10,sp,#8
vld1.32 {d30[0]},[r4,:32]
mov r9,r5
b .LNEON_8n_outer
.align 4
.LNEON_8n_outer:
vld1.32 {d28[0]},[r2,:32]! @ *b++
veor d8,d8,d8
vzip.16 d28,d8
add r7,sp,#128
vld1.32 {d4,d5,d6,d7},[r3]!
vmlal.u32 q6,d28,d0[0]
vmlal.u32 q7,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmlal.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
vmlal.u32 q10,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q11,d28,d2[1]
vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0]
vmlal.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q13,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q6,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q7,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q8,d29,d5[0]
vshr.u64 d12,d12,#16
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vadd.u64 d12,d12,d13
vmlal.u32 q11,d29,d6[1]
vshr.u64 d12,d12,#16
vmlal.u32 q12,d29,d7[0]
vmlal.u32 q13,d29,d7[1]
vadd.u64 d14,d14,d12
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0]
vmlal.u32 q7,d28,d0[0]
vld1.64 {q6},[r6,:128]!
vmlal.u32 q8,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q9,d28,d1[0]
vshl.i64 d29,d15,#16
vmlal.u32 q10,d28,d1[1]
vadd.u64 d29,d29,d14
vmlal.u32 q11,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q12,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1]
vmlal.u32 q13,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q6,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q7,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q8,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q9,d29,d5[0]
vshr.u64 d14,d14,#16
vmlal.u32 q10,d29,d5[1]
vmlal.u32 q11,d29,d6[0]
vadd.u64 d14,d14,d15
vmlal.u32 q12,d29,d6[1]
vshr.u64 d14,d14,#16
vmlal.u32 q13,d29,d7[0]
vmlal.u32 q6,d29,d7[1]
vadd.u64 d16,d16,d14
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1]
vmlal.u32 q8,d28,d0[0]
vld1.64 {q7},[r6,:128]!
vmlal.u32 q9,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q10,d28,d1[0]
vshl.i64 d29,d17,#16
vmlal.u32 q11,d28,d1[1]
vadd.u64 d29,d29,d16
vmlal.u32 q12,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q13,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2]
vmlal.u32 q6,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q7,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q8,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q9,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q10,d29,d5[0]
vshr.u64 d16,d16,#16
vmlal.u32 q11,d29,d5[1]
vmlal.u32 q12,d29,d6[0]
vadd.u64 d16,d16,d17
vmlal.u32 q13,d29,d6[1]
vshr.u64 d16,d16,#16
vmlal.u32 q6,d29,d7[0]
vmlal.u32 q7,d29,d7[1]
vadd.u64 d18,d18,d16
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2]
vmlal.u32 q9,d28,d0[0]
vld1.64 {q8},[r6,:128]!
vmlal.u32 q10,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q11,d28,d1[0]
vshl.i64 d29,d19,#16
vmlal.u32 q12,d28,d1[1]
vadd.u64 d29,d29,d18
vmlal.u32 q13,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q6,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3]
vmlal.u32 q7,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q8,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q9,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q10,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q11,d29,d5[0]
vshr.u64 d18,d18,#16
vmlal.u32 q12,d29,d5[1]
vmlal.u32 q13,d29,d6[0]
vadd.u64 d18,d18,d19
vmlal.u32 q6,d29,d6[1]
vshr.u64 d18,d18,#16
vmlal.u32 q7,d29,d7[0]
vmlal.u32 q8,d29,d7[1]
vadd.u64 d20,d20,d18
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3]
vmlal.u32 q10,d28,d0[0]
vld1.64 {q9},[r6,:128]!
vmlal.u32 q11,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q12,d28,d1[0]
vshl.i64 d29,d21,#16
vmlal.u32 q13,d28,d1[1]
vadd.u64 d29,d29,d20
vmlal.u32 q6,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q7,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4]
vmlal.u32 q8,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q9,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q10,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q11,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q12,d29,d5[0]
vshr.u64 d20,d20,#16
vmlal.u32 q13,d29,d5[1]
vmlal.u32 q6,d29,d6[0]
vadd.u64 d20,d20,d21
vmlal.u32 q7,d29,d6[1]
vshr.u64 d20,d20,#16
vmlal.u32 q8,d29,d7[0]
vmlal.u32 q9,d29,d7[1]
vadd.u64 d22,d22,d20
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4]
vmlal.u32 q11,d28,d0[0]
vld1.64 {q10},[r6,:128]!
vmlal.u32 q12,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q13,d28,d1[0]
vshl.i64 d29,d23,#16
vmlal.u32 q6,d28,d1[1]
vadd.u64 d29,d29,d22
vmlal.u32 q7,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q8,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5]
vmlal.u32 q9,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q10,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q11,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q12,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q13,d29,d5[0]
vshr.u64 d22,d22,#16
vmlal.u32 q6,d29,d5[1]
vmlal.u32 q7,d29,d6[0]
vadd.u64 d22,d22,d23
vmlal.u32 q8,d29,d6[1]
vshr.u64 d22,d22,#16
vmlal.u32 q9,d29,d7[0]
vmlal.u32 q10,d29,d7[1]
vadd.u64 d24,d24,d22
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5]
vmlal.u32 q12,d28,d0[0]
vld1.64 {q11},[r6,:128]!
vmlal.u32 q13,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q6,d28,d1[0]
vshl.i64 d29,d25,#16
vmlal.u32 q7,d28,d1[1]
vadd.u64 d29,d29,d24
vmlal.u32 q8,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q9,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6]
vmlal.u32 q10,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q11,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q12,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q13,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q6,d29,d5[0]
vshr.u64 d24,d24,#16
vmlal.u32 q7,d29,d5[1]
vmlal.u32 q8,d29,d6[0]
vadd.u64 d24,d24,d25
vmlal.u32 q9,d29,d6[1]
vshr.u64 d24,d24,#16
vmlal.u32 q10,d29,d7[0]
vmlal.u32 q11,d29,d7[1]
vadd.u64 d26,d26,d24
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6]
vmlal.u32 q13,d28,d0[0]
vld1.64 {q12},[r6,:128]!
vmlal.u32 q6,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q7,d28,d1[0]
vshl.i64 d29,d27,#16
vmlal.u32 q8,d28,d1[1]
vadd.u64 d29,d29,d26
vmlal.u32 q9,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q10,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7]
vmlal.u32 q11,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q12,d28,d3[1]
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
vmlal.u32 q13,d29,d4[0]
vld1.32 {d0,d1,d2,d3},[r1]!
vmlal.u32 q6,d29,d4[1]
vmlal.u32 q7,d29,d5[0]
vshr.u64 d26,d26,#16
vmlal.u32 q8,d29,d5[1]
vmlal.u32 q9,d29,d6[0]
vadd.u64 d26,d26,d27
vmlal.u32 q10,d29,d6[1]
vshr.u64 d26,d26,#16
vmlal.u32 q11,d29,d7[0]
vmlal.u32 q12,d29,d7[1]
vadd.u64 d12,d12,d26
vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7]
add r10,sp,#8 @ rewind
sub r8,r5,#8
b .LNEON_8n_inner
.align 4
.LNEON_8n_inner:
subs r8,r8,#8
vmlal.u32 q6,d28,d0[0]
vld1.64 {q13},[r6,:128]
vmlal.u32 q7,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0]
vmlal.u32 q8,d28,d1[0]
vld1.32 {d4,d5,d6,d7},[r3]!
vmlal.u32 q9,d28,d1[1]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q10,d28,d2[0]
vmlal.u32 q11,d28,d2[1]
vmlal.u32 q12,d28,d3[0]
vmlal.u32 q13,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1]
vmlal.u32 q6,d29,d4[0]
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmlal.u32 q11,d29,d6[1]
vmlal.u32 q12,d29,d7[0]
vmlal.u32 q13,d29,d7[1]
vst1.64 {q6},[r7,:128]!
vmlal.u32 q7,d28,d0[0]
vld1.64 {q6},[r6,:128]
vmlal.u32 q8,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1]
vmlal.u32 q9,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q10,d28,d1[1]
vmlal.u32 q11,d28,d2[0]
vmlal.u32 q12,d28,d2[1]
vmlal.u32 q13,d28,d3[0]
vmlal.u32 q6,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2]
vmlal.u32 q7,d29,d4[0]
vmlal.u32 q8,d29,d4[1]
vmlal.u32 q9,d29,d5[0]
vmlal.u32 q10,d29,d5[1]
vmlal.u32 q11,d29,d6[0]
vmlal.u32 q12,d29,d6[1]
vmlal.u32 q13,d29,d7[0]
vmlal.u32 q6,d29,d7[1]
vst1.64 {q7},[r7,:128]!
vmlal.u32 q8,d28,d0[0]
vld1.64 {q7},[r6,:128]
vmlal.u32 q9,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2]
vmlal.u32 q10,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q11,d28,d1[1]
vmlal.u32 q12,d28,d2[0]
vmlal.u32 q13,d28,d2[1]
vmlal.u32 q6,d28,d3[0]
vmlal.u32 q7,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3]
vmlal.u32 q8,d29,d4[0]
vmlal.u32 q9,d29,d4[1]
vmlal.u32 q10,d29,d5[0]
vmlal.u32 q11,d29,d5[1]
vmlal.u32 q12,d29,d6[0]
vmlal.u32 q13,d29,d6[1]
vmlal.u32 q6,d29,d7[0]
vmlal.u32 q7,d29,d7[1]
vst1.64 {q8},[r7,:128]!
vmlal.u32 q9,d28,d0[0]
vld1.64 {q8},[r6,:128]
vmlal.u32 q10,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3]
vmlal.u32 q11,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q12,d28,d1[1]
vmlal.u32 q13,d28,d2[0]
vmlal.u32 q6,d28,d2[1]
vmlal.u32 q7,d28,d3[0]
vmlal.u32 q8,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4]
vmlal.u32 q9,d29,d4[0]
vmlal.u32 q10,d29,d4[1]
vmlal.u32 q11,d29,d5[0]
vmlal.u32 q12,d29,d5[1]
vmlal.u32 q13,d29,d6[0]
vmlal.u32 q6,d29,d6[1]
vmlal.u32 q7,d29,d7[0]
vmlal.u32 q8,d29,d7[1]
vst1.64 {q9},[r7,:128]!
vmlal.u32 q10,d28,d0[0]
vld1.64 {q9},[r6,:128]
vmlal.u32 q11,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4]
vmlal.u32 q12,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q13,d28,d1[1]
vmlal.u32 q6,d28,d2[0]
vmlal.u32 q7,d28,d2[1]
vmlal.u32 q8,d28,d3[0]
vmlal.u32 q9,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5]
vmlal.u32 q10,d29,d4[0]
vmlal.u32 q11,d29,d4[1]
vmlal.u32 q12,d29,d5[0]
vmlal.u32 q13,d29,d5[1]
vmlal.u32 q6,d29,d6[0]
vmlal.u32 q7,d29,d6[1]
vmlal.u32 q8,d29,d7[0]
vmlal.u32 q9,d29,d7[1]
vst1.64 {q10},[r7,:128]!
vmlal.u32 q11,d28,d0[0]
vld1.64 {q10},[r6,:128]
vmlal.u32 q12,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5]
vmlal.u32 q13,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q6,d28,d1[1]
vmlal.u32 q7,d28,d2[0]
vmlal.u32 q8,d28,d2[1]
vmlal.u32 q9,d28,d3[0]
vmlal.u32 q10,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6]
vmlal.u32 q11,d29,d4[0]
vmlal.u32 q12,d29,d4[1]
vmlal.u32 q13,d29,d5[0]
vmlal.u32 q6,d29,d5[1]
vmlal.u32 q7,d29,d6[0]
vmlal.u32 q8,d29,d6[1]
vmlal.u32 q9,d29,d7[0]
vmlal.u32 q10,d29,d7[1]
vst1.64 {q11},[r7,:128]!
vmlal.u32 q12,d28,d0[0]
vld1.64 {q11},[r6,:128]
vmlal.u32 q13,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6]
vmlal.u32 q6,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q7,d28,d1[1]
vmlal.u32 q8,d28,d2[0]
vmlal.u32 q9,d28,d2[1]
vmlal.u32 q10,d28,d3[0]
vmlal.u32 q11,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7]
vmlal.u32 q12,d29,d4[0]
vmlal.u32 q13,d29,d4[1]
vmlal.u32 q6,d29,d5[0]
vmlal.u32 q7,d29,d5[1]
vmlal.u32 q8,d29,d6[0]
vmlal.u32 q9,d29,d6[1]
vmlal.u32 q10,d29,d7[0]
vmlal.u32 q11,d29,d7[1]
vst1.64 {q12},[r7,:128]!
vmlal.u32 q13,d28,d0[0]
vld1.64 {q12},[r6,:128]
vmlal.u32 q6,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7]
vmlal.u32 q7,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q8,d28,d1[1]
vmlal.u32 q9,d28,d2[0]
vmlal.u32 q10,d28,d2[1]
vmlal.u32 q11,d28,d3[0]
vmlal.u32 q12,d28,d3[1]
it eq
subeq r1,r1,r5,lsl#2 @ rewind
vmlal.u32 q13,d29,d4[0]
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
vmlal.u32 q6,d29,d4[1]
vld1.32 {d0,d1,d2,d3},[r1]!
vmlal.u32 q7,d29,d5[0]
add r10,sp,#8 @ rewind
vmlal.u32 q8,d29,d5[1]
vmlal.u32 q9,d29,d6[0]
vmlal.u32 q10,d29,d6[1]
vmlal.u32 q11,d29,d7[0]
vst1.64 {q13},[r7,:128]!
vmlal.u32 q12,d29,d7[1]
bne .LNEON_8n_inner
add r6,sp,#128
vst1.64 {q6,q7},[r7,:256]!
veor q2,q2,q2 @ d4-d5
vst1.64 {q8,q9},[r7,:256]!
veor q3,q3,q3 @ d6-d7
vst1.64 {q10,q11},[r7,:256]!
vst1.64 {q12},[r7,:128]
subs r9,r9,#8
vld1.64 {q6,q7},[r6,:256]!
vld1.64 {q8,q9},[r6,:256]!
vld1.64 {q10,q11},[r6,:256]!
vld1.64 {q12,q13},[r6,:256]!
itt ne
subne r3,r3,r5,lsl#2 @ rewind
bne .LNEON_8n_outer
add r7,sp,#128
vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame
vshr.u64 d10,d12,#16
vst1.64 {q2,q3},[sp,:256]!
vadd.u64 d13,d13,d10
vst1.64 {q2,q3}, [sp,:256]!
vshr.u64 d10,d13,#16
vst1.64 {q2,q3}, [sp,:256]!
vzip.16 d12,d13
mov r8,r5
b .LNEON_tail_entry
.align 4
.LNEON_tail:
vadd.u64 d12,d12,d10
vshr.u64 d10,d12,#16
vld1.64 {q8,q9}, [r6, :256]!
vadd.u64 d13,d13,d10
vld1.64 {q10,q11}, [r6, :256]!
vshr.u64 d10,d13,#16
vld1.64 {q12,q13}, [r6, :256]!
vzip.16 d12,d13
.LNEON_tail_entry:
vadd.u64 d14,d14,d10
vst1.32 {d12[0]}, [r7, :32]!
vshr.u64 d10,d14,#16
vadd.u64 d15,d15,d10
vshr.u64 d10,d15,#16
vzip.16 d14,d15
vadd.u64 d16,d16,d10
vst1.32 {d14[0]}, [r7, :32]!
vshr.u64 d10,d16,#16
vadd.u64 d17,d17,d10
vshr.u64 d10,d17,#16
vzip.16 d16,d17
vadd.u64 d18,d18,d10
vst1.32 {d16[0]}, [r7, :32]!
vshr.u64 d10,d18,#16
vadd.u64 d19,d19,d10
vshr.u64 d10,d19,#16
vzip.16 d18,d19
vadd.u64 d20,d20,d10
vst1.32 {d18[0]}, [r7, :32]!
vshr.u64 d10,d20,#16
vadd.u64 d21,d21,d10
vshr.u64 d10,d21,#16
vzip.16 d20,d21
vadd.u64 d22,d22,d10
vst1.32 {d20[0]}, [r7, :32]!
vshr.u64 d10,d22,#16
vadd.u64 d23,d23,d10
vshr.u64 d10,d23,#16
vzip.16 d22,d23
vadd.u64 d24,d24,d10
vst1.32 {d22[0]}, [r7, :32]!
vshr.u64 d10,d24,#16
vadd.u64 d25,d25,d10
vshr.u64 d10,d25,#16
vzip.16 d24,d25
vadd.u64 d26,d26,d10
vst1.32 {d24[0]}, [r7, :32]!
vshr.u64 d10,d26,#16
vadd.u64 d27,d27,d10
vshr.u64 d10,d27,#16
vzip.16 d26,d27
vld1.64 {q6,q7}, [r6, :256]!
subs r8,r8,#8
vst1.32 {d26[0]}, [r7, :32]!
bne .LNEON_tail
vst1.32 {d10[0]}, [r7, :32] @ top-most bit
sub r3,r3,r5,lsl#2 @ rewind r3
subs r1,sp,#0 @ clear carry flag
add r2,sp,r5,lsl#2
.LNEON_sub:
ldmia r1!, {r4,r5,r6,r7}
ldmia r3!, {r8,r9,r10,r11}
sbcs r8, r4,r8
sbcs r9, r5,r9
sbcs r10,r6,r10
sbcs r11,r7,r11
teq r1,r2 @ preserves carry
stmia r0!, {r8,r9,r10,r11}
bne .LNEON_sub
ldr r10, [r1] @ load top-most bit
mov r11,sp
veor q0,q0,q0
sub r11,r2,r11 @ this is num*4
veor q1,q1,q1
mov r1,sp
sub r0,r0,r11 @ rewind r0
mov r3,r2 @ second 3/4th of frame
sbcs r10,r10,#0 @ result is carry flag
.LNEON_copy_n_zap:
ldmia r1!, {r4,r5,r6,r7}
ldmia r0, {r8,r9,r10,r11}
it cc
movcc r8, r4
vst1.64 {q0,q1}, [r3,:256]! @ wipe
itt cc
movcc r9, r5
movcc r10,r6
vst1.64 {q0,q1}, [r3,:256]! @ wipe
it cc
movcc r11,r7
ldmia r1, {r4,r5,r6,r7}
stmia r0!, {r8,r9,r10,r11}
sub r1,r1,#16
ldmia r0, {r8,r9,r10,r11}
it cc
movcc r8, r4
vst1.64 {q0,q1}, [r1,:256]! @ wipe
itt cc
movcc r9, r5
movcc r10,r6
vst1.64 {q0,q1}, [r3,:256]! @ wipe
it cc
movcc r11,r7
teq r1,r2 @ preserves carry
stmia r0!, {r8,r9,r10,r11}
bne .LNEON_copy_n_zap
mov sp,ip
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
bx lr @ bx lr
.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
#endif
.byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 19,873
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aesni-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.p2align 4
_aesni_encrypt2:
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
L$enc_loop2:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop2
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.p2align 4
_aesni_encrypt3:
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
L$enc_loop3:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop3
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.p2align 4
_aesni_encrypt4:
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
xorps %xmm0,%xmm5
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 0x0f,0x1f,0x00
addq $16,%rax
L$enc_loop4:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop4
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.p2align 4
_aesni_encrypt6:
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,217
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp L$enc_loop6_enter
.p2align 4
L$enc_loop6:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
L$enc_loop6_enter:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop6
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.p2align 4
_aesni_encrypt8:
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,209
pxor %xmm0,%xmm7
pxor %xmm0,%xmm8
.byte 102,15,56,220,217
pxor %xmm0,%xmm9
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp L$enc_loop8_inner
.p2align 4
L$enc_loop8:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
L$enc_loop8_inner:
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
L$enc_loop8_enter:
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop8
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
.byte 102,68,15,56,221,192
.byte 102,68,15,56,221,200
ret
.globl _aes_hw_ctr32_encrypt_blocks
.private_extern _aes_hw_ctr32_encrypt_blocks
.p2align 4
_aes_hw_ctr32_encrypt_blocks:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit(%rip)
#endif
cmpq $1,%rdx
jne L$ctr32_bulk
movups (%r8),%xmm2
movups (%rdi),%xmm3
movl 240(%rcx),%edx
movups (%rcx),%xmm0
movups 16(%rcx),%xmm1
leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
L$oop_enc1_1:
.byte 102,15,56,220,209
decl %edx
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
jnz L$oop_enc1_1
.byte 102,15,56,221,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
xorps %xmm3,%xmm2
pxor %xmm3,%xmm3
movups %xmm2,(%rsi)
xorps %xmm2,%xmm2
jmp L$ctr32_epilogue
.p2align 4
L$ctr32_bulk:
leaq (%rsp),%r11
pushq %rbp
subq $128,%rsp
andq $-16,%rsp
movdqu (%r8),%xmm2
movdqu (%rcx),%xmm0
movl 12(%r8),%r8d
pxor %xmm0,%xmm2
movl 12(%rcx),%ebp
movdqa %xmm2,0(%rsp)
bswapl %r8d
movdqa %xmm2,%xmm3
movdqa %xmm2,%xmm4
movdqa %xmm2,%xmm5
movdqa %xmm2,64(%rsp)
movdqa %xmm2,80(%rsp)
movdqa %xmm2,96(%rsp)
movq %rdx,%r10
movdqa %xmm2,112(%rsp)
leaq 1(%r8),%rax
leaq 2(%r8),%rdx
bswapl %eax
bswapl %edx
xorl %ebp,%eax
xorl %ebp,%edx
.byte 102,15,58,34,216,3
leaq 3(%r8),%rax
movdqa %xmm3,16(%rsp)
.byte 102,15,58,34,226,3
bswapl %eax
movq %r10,%rdx
leaq 4(%r8),%r10
movdqa %xmm4,32(%rsp)
xorl %ebp,%eax
bswapl %r10d
.byte 102,15,58,34,232,3
xorl %ebp,%r10d
movdqa %xmm5,48(%rsp)
leaq 5(%r8),%r9
movl %r10d,64+12(%rsp)
bswapl %r9d
leaq 6(%r8),%r10
movl 240(%rcx),%eax
xorl %ebp,%r9d
bswapl %r10d
movl %r9d,80+12(%rsp)
xorl %ebp,%r10d
leaq 7(%r8),%r9
movl %r10d,96+12(%rsp)
bswapl %r9d
xorl %ebp,%r9d
movl %r9d,112+12(%rsp)
movups 16(%rcx),%xmm1
movdqa 64(%rsp),%xmm6
movdqa 80(%rsp),%xmm7
cmpq $8,%rdx
jb L$ctr32_tail
leaq 128(%rcx),%rcx
subq $8,%rdx
jmp L$ctr32_loop8
.p2align 5
L$ctr32_loop8:
addl $8,%r8d
movdqa 96(%rsp),%xmm8
.byte 102,15,56,220,209
movl %r8d,%r9d
movdqa 112(%rsp),%xmm9
.byte 102,15,56,220,217
bswapl %r9d
movups 32-128(%rcx),%xmm0
.byte 102,15,56,220,225
xorl %ebp,%r9d
nop
.byte 102,15,56,220,233
movl %r9d,0+12(%rsp)
leaq 1(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 48-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,16+12(%rsp)
leaq 2(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 64-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,32+12(%rsp)
leaq 3(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 80-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,48+12(%rsp)
leaq 4(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 96-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,64+12(%rsp)
leaq 5(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 112-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,80+12(%rsp)
leaq 6(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 128-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,96+12(%rsp)
leaq 7(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 144-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
xorl %ebp,%r9d
movdqu 0(%rdi),%xmm10
.byte 102,15,56,220,232
movl %r9d,112+12(%rsp)
cmpl $11,%eax
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 160-128(%rcx),%xmm0
jb L$ctr32_enc_done
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 176-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 192-128(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 208-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 224-128(%rcx),%xmm0
jmp L$ctr32_enc_done
.p2align 4
L$ctr32_enc_done:
movdqu 16(%rdi),%xmm11
pxor %xmm0,%xmm10
movdqu 32(%rdi),%xmm12
pxor %xmm0,%xmm11
movdqu 48(%rdi),%xmm13
pxor %xmm0,%xmm12
movdqu 64(%rdi),%xmm14
pxor %xmm0,%xmm13
movdqu 80(%rdi),%xmm15
pxor %xmm0,%xmm14
prefetcht0 448(%rdi)
prefetcht0 512(%rdi)
pxor %xmm0,%xmm15
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movdqu 96(%rdi),%xmm1
leaq 128(%rdi),%rdi
.byte 102,65,15,56,221,210
pxor %xmm0,%xmm1
movdqu 112-128(%rdi),%xmm10
.byte 102,65,15,56,221,219
pxor %xmm0,%xmm10
movdqa 0(%rsp),%xmm11
.byte 102,65,15,56,221,228
.byte 102,65,15,56,221,237
movdqa 16(%rsp),%xmm12
movdqa 32(%rsp),%xmm13
.byte 102,65,15,56,221,246
.byte 102,65,15,56,221,255
movdqa 48(%rsp),%xmm14
movdqa 64(%rsp),%xmm15
.byte 102,68,15,56,221,193
movdqa 80(%rsp),%xmm0
movups 16-128(%rcx),%xmm1
.byte 102,69,15,56,221,202
movups %xmm2,(%rsi)
movdqa %xmm11,%xmm2
movups %xmm3,16(%rsi)
movdqa %xmm12,%xmm3
movups %xmm4,32(%rsi)
movdqa %xmm13,%xmm4
movups %xmm5,48(%rsi)
movdqa %xmm14,%xmm5
movups %xmm6,64(%rsi)
movdqa %xmm15,%xmm6
movups %xmm7,80(%rsi)
movdqa %xmm0,%xmm7
movups %xmm8,96(%rsi)
movups %xmm9,112(%rsi)
leaq 128(%rsi),%rsi
subq $8,%rdx
jnc L$ctr32_loop8
addq $8,%rdx
jz L$ctr32_done
leaq -128(%rcx),%rcx
L$ctr32_tail:
leaq 16(%rcx),%rcx
cmpq $4,%rdx
jb L$ctr32_loop3
je L$ctr32_loop4
shll $4,%eax
movdqa 96(%rsp),%xmm8
pxor %xmm9,%xmm9
movups 16(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
leaq 32-16(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,225
addq $16,%rax
movups (%rdi),%xmm10
.byte 102,15,56,220,233
.byte 102,15,56,220,241
movups 16(%rdi),%xmm11
movups 32(%rdi),%xmm12
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
call L$enc_loop8_enter
movdqu 48(%rdi),%xmm13
pxor %xmm10,%xmm2
movdqu 64(%rdi),%xmm10
pxor %xmm11,%xmm3
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm4,32(%rsi)
pxor %xmm10,%xmm6
movdqu %xmm5,48(%rsi)
movdqu %xmm6,64(%rsi)
cmpq $6,%rdx
jb L$ctr32_done
movups 80(%rdi),%xmm11
xorps %xmm11,%xmm7
movups %xmm7,80(%rsi)
je L$ctr32_done
movups 96(%rdi),%xmm12
xorps %xmm12,%xmm8
movups %xmm8,96(%rsi)
jmp L$ctr32_done
.p2align 5
L$ctr32_loop4:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx),%xmm1
jnz L$ctr32_loop4
.byte 102,15,56,221,209
.byte 102,15,56,221,217
movups (%rdi),%xmm10
movups 16(%rdi),%xmm11
.byte 102,15,56,221,225
.byte 102,15,56,221,233
movups 32(%rdi),%xmm12
movups 48(%rdi),%xmm13
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm4,32(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm5,48(%rsi)
jmp L$ctr32_done
.p2align 5
L$ctr32_loop3:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx),%xmm1
jnz L$ctr32_loop3
.byte 102,15,56,221,209
.byte 102,15,56,221,217
.byte 102,15,56,221,225
movups (%rdi),%xmm10
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
cmpq $2,%rdx
jb L$ctr32_done
movups 16(%rdi),%xmm11
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
je L$ctr32_done
movups 32(%rdi),%xmm12
xorps %xmm12,%xmm4
movups %xmm4,32(%rsi)
L$ctr32_done:
xorps %xmm0,%xmm0
xorl %ebp,%ebp
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movaps %xmm0,0(%rsp)
pxor %xmm8,%xmm8
movaps %xmm0,16(%rsp)
pxor %xmm9,%xmm9
movaps %xmm0,32(%rsp)
pxor %xmm10,%xmm10
movaps %xmm0,48(%rsp)
pxor %xmm11,%xmm11
movaps %xmm0,64(%rsp)
pxor %xmm12,%xmm12
movaps %xmm0,80(%rsp)
pxor %xmm13,%xmm13
movaps %xmm0,96(%rsp)
pxor %xmm14,%xmm14
movaps %xmm0,112(%rsp)
pxor %xmm15,%xmm15
movq -8(%r11),%rbp
leaq (%r11),%rsp
L$ctr32_epilogue:
ret
.globl _aes_hw_set_encrypt_key_base
.private_extern _aes_hw_set_encrypt_key_base
.p2align 4
_aes_hw_set_encrypt_key_base:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je L$14rounds
cmpl $128,%esi
jne L$bad_keybits
L$10rounds:
movl $9,%esi
movups %xmm0,(%rdx)
.byte 102,15,58,223,200,1
call L$key_expansion_128_cold
.byte 102,15,58,223,200,2
call L$key_expansion_128
.byte 102,15,58,223,200,4
call L$key_expansion_128
.byte 102,15,58,223,200,8
call L$key_expansion_128
.byte 102,15,58,223,200,16
call L$key_expansion_128
.byte 102,15,58,223,200,32
call L$key_expansion_128
.byte 102,15,58,223,200,64
call L$key_expansion_128
.byte 102,15,58,223,200,128
call L$key_expansion_128
.byte 102,15,58,223,200,27
call L$key_expansion_128
.byte 102,15,58,223,200,54
call L$key_expansion_128
movups %xmm0,(%rax)
movl %esi,80(%rax)
xorl %eax,%eax
jmp L$enc_key_ret
.p2align 4
L$14rounds:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movups %xmm0,(%rdx)
movups %xmm2,16(%rdx)
.byte 102,15,58,223,202,1
call L$key_expansion_256a_cold
.byte 102,15,58,223,200,1
call L$key_expansion_256b
.byte 102,15,58,223,202,2
call L$key_expansion_256a
.byte 102,15,58,223,200,2
call L$key_expansion_256b
.byte 102,15,58,223,202,4
call L$key_expansion_256a
.byte 102,15,58,223,200,4
call L$key_expansion_256b
.byte 102,15,58,223,202,8
call L$key_expansion_256a
.byte 102,15,58,223,200,8
call L$key_expansion_256b
.byte 102,15,58,223,202,16
call L$key_expansion_256a
.byte 102,15,58,223,200,16
call L$key_expansion_256b
.byte 102,15,58,223,202,32
call L$key_expansion_256a
.byte 102,15,58,223,200,32
call L$key_expansion_256b
.byte 102,15,58,223,202,64
call L$key_expansion_256a
movups %xmm0,(%rax)
movl %esi,16(%rax)
xorq %rax,%rax
jmp L$enc_key_ret
.p2align 4
L$bad_keybits:
movq $-2,%rax
L$enc_key_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
ret
.p2align 4
L$key_expansion_128:
movups %xmm0,(%rax)
leaq 16(%rax),%rax
L$key_expansion_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.p2align 4
L$key_expansion_256a:
movups %xmm2,(%rax)
leaq 16(%rax),%rax
L$key_expansion_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.p2align 4
L$key_expansion_256b:
movups %xmm0,(%rax)
leaq 16(%rax),%rax
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.globl _aes_hw_set_encrypt_key_alt
.private_extern _aes_hw_set_encrypt_key_alt
.p2align 4
_aes_hw_set_encrypt_key_alt:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je L$14rounds_alt
cmpl $128,%esi
jne L$bad_keybits_alt
movl $9,%esi
movdqa L$key_rotate(%rip),%xmm5
movl $8,%r10d
movdqa L$key_rcon1(%rip),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,(%rdx)
jmp L$oop_key128
.p2align 4
L$oop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leaq 16(%rax),%rax
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%rax)
movdqa %xmm0,%xmm2
decl %r10d
jnz L$oop_key128
movdqa L$key_rcon1b(%rip),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%rax)
movl %esi,96(%rax)
xorl %eax,%eax
jmp L$enc_key_ret_alt
.p2align 4
L$14rounds_alt:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movdqa L$key_rotate(%rip),%xmm5
movdqa L$key_rcon1(%rip),%xmm4
movl $7,%r10d
movdqu %xmm0,0(%rdx)
movdqa %xmm2,%xmm1
movdqu %xmm2,16(%rdx)
jmp L$oop_key256
.p2align 4
L$oop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
decl %r10d
jz L$done_key256
pshufd $0xff,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%rax)
leaq 32(%rax),%rax
movdqa %xmm2,%xmm1
jmp L$oop_key256
L$done_key256:
movl %esi,16(%rax)
xorl %eax,%eax
jmp L$enc_key_ret_alt
.p2align 4
L$bad_keybits_alt:
movq $-2,%rax
L$enc_key_ret_alt:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
ret
.section __DATA,__const
.p2align 6
L$bswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
L$increment32:
.long 6,6,6,0
L$increment64:
.long 1,0,0,0
L$increment1:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
L$key_rotate:
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
L$key_rotate192:
.long 0x04070605,0x04070605,0x04070605,0x04070605
L$key_rcon1:
.long 1,1,1,1
L$key_rcon1b:
.long 0x1b,0x1b,0x1b,0x1b
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 6
.text
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 51,084
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/x86_64-mont5-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl bn_mul4x_mont_gather5
.hidden bn_mul4x_mont_gather5
.type bn_mul4x_mont_gather5,@function
.align 32
bn_mul4x_mont_gather5:
.cfi_startproc
_CET_ENDBR
.byte 0x67
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
.Lmul4x_prologue:
.byte 0x67
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb .Lmul4xsp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp .Lmul4xsp_done
.align 32
.Lmul4xsp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
.Lmul4xsp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lmul4x_page_walk
jmp .Lmul4x_page_walk_done
.Lmul4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lmul4x_page_walk
.Lmul4x_page_walk_done:
negq %r9
movq %rax,40(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
.Lmul4x_body:
call mul4x_internal
movq 40(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmul4x_epilogue:
ret
.cfi_endproc
.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
.type mul4x_internal,@function
.align 32
mul4x_internal:
.cfi_startproc
shlq $5,%r9
movd 8(%rax),%xmm5
leaq .Linc(%rip),%rax
leaq 128(%rdx,%r9,1),%r13
shrq $5,%r9
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 88-112(%rsp,%r9,1),%r10
leaq 128(%rdx),%r12
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
.byte 0x67,0x67
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
.byte 0x67
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,112(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,128(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,144(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,160(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,176(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,192(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,208(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,224(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,240(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,256(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,272(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,288(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,304(%r10)
paddd %xmm2,%xmm3
.byte 0x67
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,320(%r10)
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,336(%r10)
pand 64(%r12),%xmm0
pand 80(%r12),%xmm1
pand 96(%r12),%xmm2
movdqa %xmm3,352(%r10)
pand 112(%r12),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -128(%r12),%xmm4
movdqa -112(%r12),%xmm5
movdqa -96(%r12),%xmm2
pand 112(%r10),%xmm4
movdqa -80(%r12),%xmm3
pand 128(%r10),%xmm5
por %xmm4,%xmm0
pand 144(%r10),%xmm2
por %xmm5,%xmm1
pand 160(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -64(%r12),%xmm4
movdqa -48(%r12),%xmm5
movdqa -32(%r12),%xmm2
pand 176(%r10),%xmm4
movdqa -16(%r12),%xmm3
pand 192(%r10),%xmm5
por %xmm4,%xmm0
pand 208(%r10),%xmm2
por %xmm5,%xmm1
pand 224(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa 0(%r12),%xmm4
movdqa 16(%r12),%xmm5
movdqa 32(%r12),%xmm2
pand 240(%r10),%xmm4
movdqa 48(%r12),%xmm3
pand 256(%r10),%xmm5
por %xmm4,%xmm0
pand 272(%r10),%xmm2
por %xmm5,%xmm1
pand 288(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
por %xmm1,%xmm0
pshufd $0x4e,%xmm0,%xmm1
por %xmm1,%xmm0
leaq 256(%r12),%r12
.byte 102,72,15,126,195
movq %r13,16+8(%rsp)
movq %rdi,56+8(%rsp)
movq (%r8),%r8
movq (%rsi),%rax
leaq (%rsi,%r9,1),%rsi
negq %r9
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
leaq 64+8(%rsp),%r14
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi,%r9,1),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%r9),%r15
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdi,(%r14)
movq %rdx,%r13
jmp .L1st4x
.align 32
.L1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq 0(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdi,(%r14)
movq %rdx,%r13
addq $32,%r15
jnz .L1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%r13
leaq (%rcx,%r9,1),%rcx
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%r14)
jmp .Louter4x
.align 32
.Louter4x:
leaq 16+128(%r14),%rdx
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa -128(%r12),%xmm0
movdqa -112(%r12),%xmm1
movdqa -96(%r12),%xmm2
movdqa -80(%r12),%xmm3
pand -128(%rdx),%xmm0
pand -112(%rdx),%xmm1
por %xmm0,%xmm4
pand -96(%rdx),%xmm2
por %xmm1,%xmm5
pand -80(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%r12),%xmm0
movdqa -48(%r12),%xmm1
movdqa -32(%r12),%xmm2
movdqa -16(%r12),%xmm3
pand -64(%rdx),%xmm0
pand -48(%rdx),%xmm1
por %xmm0,%xmm4
pand -32(%rdx),%xmm2
por %xmm1,%xmm5
pand -16(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%r12),%xmm0
movdqa 16(%r12),%xmm1
movdqa 32(%r12),%xmm2
movdqa 48(%r12),%xmm3
pand 0(%rdx),%xmm0
pand 16(%rdx),%xmm1
por %xmm0,%xmm4
pand 32(%rdx),%xmm2
por %xmm1,%xmm5
pand 48(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%r12),%xmm0
movdqa 80(%r12),%xmm1
movdqa 96(%r12),%xmm2
movdqa 112(%r12),%xmm3
pand 64(%rdx),%xmm0
pand 80(%rdx),%xmm1
por %xmm0,%xmm4
pand 96(%rdx),%xmm2
por %xmm1,%xmm5
pand 112(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
leaq 256(%r12),%r12
.byte 102,72,15,126,195
movq (%r14,%r9,1),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
movq %rdi,(%r14)
leaq (%r14,%r9,1),%r14
mulq %rbp
addq %rax,%r10
movq 8(%rsi,%r9,1),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%r9),%r15
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdx,%r13
jmp .Linner4x
.align 32
.Linner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
adcq $0,%rdx
addq 16(%r14),%r10
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-32(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
addq -8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq 0(%rcx),%rax
adcq $0,%rdx
addq (%r14),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %r13,-8(%r14)
movq %rdx,%r13
addq $32,%r15
jnz .Linner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
adcq $0,%rdx
addq 16(%r14),%r10
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-32(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq %rbp,%rax
movq -8(%rcx),%rbp
adcq $0,%rdx
addq -8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%r13
movq %rdi,-16(%r14)
leaq (%rcx,%r9,1),%rcx
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%r14),%r13
adcq $0,%rdi
movq %r13,-8(%r14)
cmpq 16+8(%rsp),%r12
jb .Louter4x
xorq %rax,%rax
subq %r13,%rbp
adcq %r15,%r15
orq %r15,%rdi
subq %rdi,%rax
leaq (%r14,%r9,1),%rbx
movq (%rcx),%r12
leaq (%rcx),%rbp
movq %r9,%rcx
sarq $3+2,%rcx
movq 56+8(%rsp),%rdi
decq %r12
xorq %r10,%r10
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp .Lsqr4x_sub_entry
.cfi_endproc
.size mul4x_internal,.-mul4x_internal
.globl bn_power5_nohw
.hidden bn_power5_nohw
.type bn_power5_nohw,@function
.align 32
bn_power5_nohw:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
.Lpower5_prologue:
shll $3,%r9d
leal (%r9,%r9,2),%r10d
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb .Lpwr_sp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp .Lpwr_sp_done
.align 32
.Lpwr_sp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
.Lpwr_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lpwr_page_walk
jmp .Lpwr_page_walk_done
.Lpwr_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lpwr_page_walk
.Lpwr_page_walk_done:
movq %r9,%r10
negq %r9
movq %r8,32(%rsp)
movq %rax,40(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
.Lpower5_body:
.byte 102,72,15,110,207
.byte 102,72,15,110,209
.byte 102,73,15,110,218
.byte 102,72,15,110,226
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
.byte 102,72,15,126,209
.byte 102,72,15,126,226
movq %rsi,%rdi
movq 40(%rsp),%rax
leaq 32(%rsp),%r8
call mul4x_internal
movq 40(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpower5_epilogue:
ret
.cfi_endproc
.size bn_power5_nohw,.-bn_power5_nohw
.globl bn_sqr8x_internal
.hidden bn_sqr8x_internal
.hidden bn_sqr8x_internal
.type bn_sqr8x_internal,@function
.align 32
bn_sqr8x_internal:
__bn_sqr8x_internal:
.cfi_startproc
_CET_ENDBR
leaq 32(%r10),%rbp
leaq (%rsi,%r9,1),%rsi
movq %r9,%rcx
movq -32(%rsi,%rbp,1),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi,%rbp,1),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi,%rbp,1),%rbx
movq %rax,%r15
mulq %r14
movq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
movq %r10,-24(%rdi,%rbp,1)
mulq %r14
addq %rax,%r11
movq %rbx,%rax
adcq $0,%rdx
movq %r11,-16(%rdi,%rbp,1)
movq %rdx,%r10
movq -8(%rsi,%rbp,1),%rbx
mulq %r15
movq %rax,%r12
movq %rbx,%rax
movq %rdx,%r13
leaq (%rbp),%rcx
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
jmp .Lsqr4x_1st
.align 32
.Lsqr4x_1st:
movq (%rsi,%rcx,1),%rbx
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %rdx,%r12
adcq $0,%r12
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 8(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,(%rdi,%rcx,1)
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq 16(%rsi,%rcx,1),%rbx
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %r10,8(%rdi,%rcx,1)
movq %rdx,%r12
adcq $0,%r12
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 24(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,16(%rdi,%rcx,1)
movq %rdx,%r13
adcq $0,%r13
leaq 32(%rcx),%rcx
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
cmpq $0,%rcx
jne .Lsqr4x_1st
mulq %r15
addq %rax,%r13
leaq 16(%rbp),%rbp
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
jmp .Lsqr4x_outer
.align 32
.Lsqr4x_outer:
movq -32(%rsi,%rbp,1),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi,%rbp,1),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi,%rbp,1),%rbx
movq %rax,%r15
mulq %r14
movq -24(%rdi,%rbp,1),%r10
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
movq %r10,-24(%rdi,%rbp,1)
movq %rdx,%r11
mulq %r14
addq %rax,%r11
movq %rbx,%rax
adcq $0,%rdx
addq -16(%rdi,%rbp,1),%r11
movq %rdx,%r10
adcq $0,%r10
movq %r11,-16(%rdi,%rbp,1)
xorq %r12,%r12
movq -8(%rsi,%rbp,1),%rbx
mulq %r15
addq %rax,%r12
movq %rbx,%rax
adcq $0,%rdx
addq -8(%rdi,%rbp,1),%r12
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
addq %r12,%r10
movq %rdx,%r11
adcq $0,%r11
movq %r10,-8(%rdi,%rbp,1)
leaq (%rbp),%rcx
jmp .Lsqr4x_inner
.align 32
.Lsqr4x_inner:
movq (%rsi,%rcx,1),%rbx
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %rdx,%r12
adcq $0,%r12
addq (%rdi,%rcx,1),%r13
adcq $0,%r12
.byte 0x67
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 8(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %r11,(%rdi,%rcx,1)
movq %rbx,%rax
movq %rdx,%r13
adcq $0,%r13
addq 8(%rdi,%rcx,1),%r12
leaq 16(%rcx),%rcx
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
addq %r12,%r10
movq %rdx,%r11
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
cmpq $0,%rcx
jne .Lsqr4x_inner
.byte 0x67
mulq %r15
addq %rax,%r13
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
addq $16,%rbp
jnz .Lsqr4x_outer
movq -32(%rsi),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi),%rbx
movq %rax,%r15
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq %r10,-24(%rdi)
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
movq -8(%rsi),%rbx
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,-16(%rdi)
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi)
mulq %r15
addq %rax,%r13
movq -16(%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
mulq %rbx
addq $16,%rbp
xorq %r14,%r14
subq %r9,%rbp
xorq %r15,%r15
addq %r12,%rax
adcq $0,%rdx
movq %rax,8(%rdi)
movq %rdx,16(%rdi)
movq %r15,24(%rdi)
movq -16(%rsi,%rbp,1),%rax
leaq 48+8(%rsp),%rdi
xorq %r10,%r10
movq 8(%rdi),%r11
leaq (%r14,%r10,2),%r12
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq 16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 24(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi,%rbp,1),%rax
movq %r12,(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 32(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 40(%rdi),%r11
adcq %rax,%rbx
movq 0(%rsi,%rbp,1),%rax
movq %rbx,16(%rdi)
adcq %rdx,%r8
leaq 16(%rbp),%rbp
movq %r8,24(%rdi)
sbbq %r15,%r15
leaq 64(%rdi),%rdi
jmp .Lsqr4x_shift_n_add
.align 32
.Lsqr4x_shift_n_add:
leaq (%r14,%r10,2),%r12
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq -16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq -8(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi,%rbp,1),%rax
movq %r12,-32(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,-24(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 0(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 8(%rdi),%r11
adcq %rax,%rbx
movq 0(%rsi,%rbp,1),%rax
movq %rbx,-16(%rdi)
adcq %rdx,%r8
leaq (%r14,%r10,2),%r12
movq %r8,-8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq 16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 24(%rdi),%r11
adcq %rax,%r12
movq 8(%rsi,%rbp,1),%rax
movq %r12,0(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 32(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 40(%rdi),%r11
adcq %rax,%rbx
movq 16(%rsi,%rbp,1),%rax
movq %rbx,16(%rdi)
adcq %rdx,%r8
movq %r8,24(%rdi)
sbbq %r15,%r15
leaq 64(%rdi),%rdi
addq $32,%rbp
jnz .Lsqr4x_shift_n_add
leaq (%r14,%r10,2),%r12
.byte 0x67
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq -16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq -8(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi),%rax
movq %r12,-32(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,-24(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
mulq %rax
negq %r15
adcq %rax,%rbx
adcq %rdx,%r8
movq %rbx,-16(%rdi)
movq %r8,-8(%rdi)
.byte 102,72,15,126,213
__bn_sqr8x_reduction:
xorq %rax,%rax
leaq (%r9,%rbp,1),%rcx
leaq 48+8(%rsp,%r9,2),%rdx
movq %rcx,0+8(%rsp)
leaq 48+8(%rsp,%r9,1),%rdi
movq %rdx,8+8(%rsp)
negq %r9
jmp .L8x_reduction_loop
.align 32
.L8x_reduction_loop:
leaq (%rdi,%r9,1),%rdi
.byte 0x66
movq 0(%rdi),%rbx
movq 8(%rdi),%r9
movq 16(%rdi),%r10
movq 24(%rdi),%r11
movq 32(%rdi),%r12
movq 40(%rdi),%r13
movq 48(%rdi),%r14
movq 56(%rdi),%r15
movq %rax,(%rdx)
leaq 64(%rdi),%rdi
.byte 0x67
movq %rbx,%r8
imulq 32+8(%rsp),%rbx
movq 0(%rbp),%rax
movl $8,%ecx
jmp .L8x_reduce
.align 32
.L8x_reduce:
mulq %rbx
movq 8(%rbp),%rax
negq %r8
movq %rdx,%r8
adcq $0,%r8
mulq %rbx
addq %rax,%r9
movq 16(%rbp),%rax
adcq $0,%rdx
addq %r9,%r8
movq %rbx,48-8+8(%rsp,%rcx,8)
movq %rdx,%r9
adcq $0,%r9
mulq %rbx
addq %rax,%r10
movq 24(%rbp),%rax
adcq $0,%rdx
addq %r10,%r9
movq 32+8(%rsp),%rsi
movq %rdx,%r10
adcq $0,%r10
mulq %rbx
addq %rax,%r11
movq 32(%rbp),%rax
adcq $0,%rdx
imulq %r8,%rsi
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
mulq %rbx
addq %rax,%r12
movq 40(%rbp),%rax
adcq $0,%rdx
addq %r12,%r11
movq %rdx,%r12
adcq $0,%r12
mulq %rbx
addq %rax,%r13
movq 48(%rbp),%rax
adcq $0,%rdx
addq %r13,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %rbx
addq %rax,%r14
movq 56(%rbp),%rax
adcq $0,%rdx
addq %r14,%r13
movq %rdx,%r14
adcq $0,%r14
mulq %rbx
movq %rsi,%rbx
addq %rax,%r15
movq 0(%rbp),%rax
adcq $0,%rdx
addq %r15,%r14
movq %rdx,%r15
adcq $0,%r15
decl %ecx
jnz .L8x_reduce
leaq 64(%rbp),%rbp
xorq %rax,%rax
movq 8+8(%rsp),%rdx
cmpq 0+8(%rsp),%rbp
jae .L8x_no_tail
.byte 0x66
addq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
sbbq %rsi,%rsi
movq 48+56+8(%rsp),%rbx
movl $8,%ecx
movq 0(%rbp),%rax
jmp .L8x_tail
.align 32
.L8x_tail:
mulq %rbx
addq %rax,%r8
movq 8(%rbp),%rax
movq %r8,(%rdi)
movq %rdx,%r8
adcq $0,%r8
mulq %rbx
addq %rax,%r9
movq 16(%rbp),%rax
adcq $0,%rdx
addq %r9,%r8
leaq 8(%rdi),%rdi
movq %rdx,%r9
adcq $0,%r9
mulq %rbx
addq %rax,%r10
movq 24(%rbp),%rax
adcq $0,%rdx
addq %r10,%r9
movq %rdx,%r10
adcq $0,%r10
mulq %rbx
addq %rax,%r11
movq 32(%rbp),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
mulq %rbx
addq %rax,%r12
movq 40(%rbp),%rax
adcq $0,%rdx
addq %r12,%r11
movq %rdx,%r12
adcq $0,%r12
mulq %rbx
addq %rax,%r13
movq 48(%rbp),%rax
adcq $0,%rdx
addq %r13,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %rbx
addq %rax,%r14
movq 56(%rbp),%rax
adcq $0,%rdx
addq %r14,%r13
movq %rdx,%r14
adcq $0,%r14
mulq %rbx
movq 48-16+8(%rsp,%rcx,8),%rbx
addq %rax,%r15
adcq $0,%rdx
addq %r15,%r14
movq 0(%rbp),%rax
movq %rdx,%r15
adcq $0,%r15
decl %ecx
jnz .L8x_tail
leaq 64(%rbp),%rbp
movq 8+8(%rsp),%rdx
cmpq 0+8(%rsp),%rbp
jae .L8x_tail_done
movq 48+56+8(%rsp),%rbx
negq %rsi
movq 0(%rbp),%rax
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
sbbq %rsi,%rsi
movl $8,%ecx
jmp .L8x_tail
.align 32
.L8x_tail_done:
xorq %rax,%rax
addq (%rdx),%r8
adcq $0,%r9
adcq $0,%r10
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
adcq $0,%rax
negq %rsi
.L8x_no_tail:
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
adcq $0,%rax
movq -8(%rbp),%rcx
xorq %rsi,%rsi
.byte 102,72,15,126,213
movq %r8,0(%rdi)
movq %r9,8(%rdi)
.byte 102,73,15,126,217
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq %r12,32(%rdi)
movq %r13,40(%rdi)
movq %r14,48(%rdi)
movq %r15,56(%rdi)
leaq 64(%rdi),%rdi
cmpq %rdx,%rdi
jb .L8x_reduction_loop
ret
.cfi_endproc
.size bn_sqr8x_internal,.-bn_sqr8x_internal
.type __bn_post4x_internal,@function
.align 32
__bn_post4x_internal:
.cfi_startproc
movq 0(%rbp),%r12
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
.byte 102,72,15,126,207
negq %rax
.byte 102,72,15,126,206
sarq $3+2,%rcx
decq %r12
xorq %r10,%r10
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp .Lsqr4x_sub_entry
.align 16
.Lsqr4x_sub:
movq 0(%rbp),%r12
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
.Lsqr4x_sub_entry:
leaq 32(%rbp),%rbp
notq %r12
notq %r13
notq %r14
notq %r15
andq %rax,%r12
andq %rax,%r13
andq %rax,%r14
andq %rax,%r15
negq %r10
adcq 0(%rbx),%r12
adcq 8(%rbx),%r13
adcq 16(%rbx),%r14
adcq 24(%rbx),%r15
movq %r12,0(%rdi)
leaq 32(%rbx),%rbx
movq %r13,8(%rdi)
sbbq %r10,%r10
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz .Lsqr4x_sub
movq %r9,%r10
negq %r9
ret
.cfi_endproc
.size __bn_post4x_internal,.-__bn_post4x_internal
.globl bn_mulx4x_mont_gather5
.hidden bn_mulx4x_mont_gather5
.type bn_mulx4x_mont_gather5,@function
.align 32
bn_mulx4x_mont_gather5:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
.Lmulx4x_prologue:
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb .Lmulx4xsp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp .Lmulx4xsp_done
.Lmulx4xsp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
.Lmulx4xsp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lmulx4x_page_walk
jmp .Lmulx4x_page_walk_done
.Lmulx4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lmulx4x_page_walk
.Lmulx4x_page_walk_done:
movq %r8,32(%rsp)
movq %rax,40(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
.Lmulx4x_body:
call mulx4x_internal
movq 40(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lmulx4x_epilogue:
ret
.cfi_endproc
.size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
.type mulx4x_internal,@function
.align 32
mulx4x_internal:
.cfi_startproc
movq %r9,8(%rsp)
movq %r9,%r10
negq %r9
shlq $5,%r9
negq %r10
leaq 128(%rdx,%r9,1),%r13
shrq $5+5,%r9
movd 8(%rax),%xmm5
subq $1,%r9
leaq .Linc(%rip),%rax
movq %r13,16+8(%rsp)
movq %r9,24+8(%rsp)
movq %rdi,56+8(%rsp)
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 88-112(%rsp,%r10,1),%r10
leaq 128(%rdx),%rdi
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
.byte 0x67
movdqa %xmm1,%xmm2
.byte 0x67
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,112(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,128(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,144(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,160(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,176(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,192(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,208(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,224(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,240(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,256(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,272(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,288(%r10)
movdqa %xmm4,%xmm3
.byte 0x67
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,304(%r10)
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,320(%r10)
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,336(%r10)
pand 64(%rdi),%xmm0
pand 80(%rdi),%xmm1
pand 96(%rdi),%xmm2
movdqa %xmm3,352(%r10)
pand 112(%rdi),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -128(%rdi),%xmm4
movdqa -112(%rdi),%xmm5
movdqa -96(%rdi),%xmm2
pand 112(%r10),%xmm4
movdqa -80(%rdi),%xmm3
pand 128(%r10),%xmm5
por %xmm4,%xmm0
pand 144(%r10),%xmm2
por %xmm5,%xmm1
pand 160(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -64(%rdi),%xmm4
movdqa -48(%rdi),%xmm5
movdqa -32(%rdi),%xmm2
pand 176(%r10),%xmm4
movdqa -16(%rdi),%xmm3
pand 192(%r10),%xmm5
por %xmm4,%xmm0
pand 208(%r10),%xmm2
por %xmm5,%xmm1
pand 224(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa 0(%rdi),%xmm4
movdqa 16(%rdi),%xmm5
movdqa 32(%rdi),%xmm2
pand 240(%r10),%xmm4
movdqa 48(%rdi),%xmm3
pand 256(%r10),%xmm5
por %xmm4,%xmm0
pand 272(%r10),%xmm2
por %xmm5,%xmm1
pand 288(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
pxor %xmm1,%xmm0
pshufd $0x4e,%xmm0,%xmm1
por %xmm1,%xmm0
leaq 256(%rdi),%rdi
.byte 102,72,15,126,194
leaq 64+32+8(%rsp),%rbx
movq %rdx,%r9
mulxq 0(%rsi),%r8,%rax
mulxq 8(%rsi),%r11,%r12
addq %rax,%r11
mulxq 16(%rsi),%rax,%r13
adcq %rax,%r12
adcq $0,%r13
mulxq 24(%rsi),%rax,%r14
movq %r8,%r15
imulq 32+8(%rsp),%r8
xorq %rbp,%rbp
movq %r8,%rdx
movq %rdi,8+8(%rsp)
leaq 32(%rsi),%rsi
adcxq %rax,%r13
adcxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
movq 24+8(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
adcxq %rax,%r12
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r12,-16(%rbx)
jmp .Lmulx4x_1st
.align 32
.Lmulx4x_1st:
adcxq %rbp,%r15
mulxq 0(%rsi),%r10,%rax
adcxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
.byte 0x67,0x67
movq %r8,%rdx
adcxq %rax,%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
movq %r11,-32(%rbx)
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz .Lmulx4x_1st
movq 8(%rsp),%rax
adcq %rbp,%r15
leaq (%rsi,%rax,1),%rsi
addq %r15,%r14
movq 8+8(%rsp),%rdi
adcq %rbp,%rbp
movq %r14,-8(%rbx)
jmp .Lmulx4x_outer
.align 32
.Lmulx4x_outer:
leaq 16-256(%rbx),%r10
pxor %xmm4,%xmm4
.byte 0x67,0x67
pxor %xmm5,%xmm5
movdqa -128(%rdi),%xmm0
movdqa -112(%rdi),%xmm1
movdqa -96(%rdi),%xmm2
pand 256(%r10),%xmm0
movdqa -80(%rdi),%xmm3
pand 272(%r10),%xmm1
por %xmm0,%xmm4
pand 288(%r10),%xmm2
por %xmm1,%xmm5
pand 304(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%rdi),%xmm0
movdqa -48(%rdi),%xmm1
movdqa -32(%rdi),%xmm2
pand 320(%r10),%xmm0
movdqa -16(%rdi),%xmm3
pand 336(%r10),%xmm1
por %xmm0,%xmm4
pand 352(%r10),%xmm2
por %xmm1,%xmm5
pand 368(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%rdi),%xmm0
movdqa 16(%rdi),%xmm1
movdqa 32(%rdi),%xmm2
pand 384(%r10),%xmm0
movdqa 48(%rdi),%xmm3
pand 400(%r10),%xmm1
por %xmm0,%xmm4
pand 416(%r10),%xmm2
por %xmm1,%xmm5
pand 432(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%rdi),%xmm0
movdqa 80(%rdi),%xmm1
movdqa 96(%rdi),%xmm2
pand 448(%r10),%xmm0
movdqa 112(%rdi),%xmm3
pand 464(%r10),%xmm1
por %xmm0,%xmm4
pand 480(%r10),%xmm2
por %xmm1,%xmm5
pand 496(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
leaq 256(%rdi),%rdi
.byte 102,72,15,126,194
movq %rbp,(%rbx)
leaq 32(%rbx,%rax,1),%rbx
mulxq 0(%rsi),%r8,%r11
xorq %rbp,%rbp
movq %rdx,%r9
mulxq 8(%rsi),%r14,%r12
adoxq -32(%rbx),%r8
adcxq %r14,%r11
mulxq 16(%rsi),%r15,%r13
adoxq -24(%rbx),%r11
adcxq %r15,%r12
mulxq 24(%rsi),%rdx,%r14
adoxq -16(%rbx),%r12
adcxq %rdx,%r13
leaq (%rcx,%rax,1),%rcx
leaq 32(%rsi),%rsi
adoxq -8(%rbx),%r13
adcxq %rbp,%r14
adoxq %rbp,%r14
movq %r8,%r15
imulq 32+8(%rsp),%r8
movq %r8,%rdx
xorq %rbp,%rbp
movq %rdi,8+8(%rsp)
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq 24+8(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r12
movq %r11,-24(%rbx)
adoxq %rbp,%r15
movq %r12,-16(%rbx)
leaq 32(%rcx),%rcx
jmp .Lmulx4x_inner
.align 32
.Lmulx4x_inner:
mulxq 0(%rsi),%r10,%rax
adcxq %rbp,%r15
adoxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq 0(%rbx),%r10
adoxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq 8(%rbx),%r11
adoxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
movq %r8,%rdx
adcxq 16(%rbx),%r12
adoxq %rax,%r13
adcxq 24(%rbx),%r13
adoxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adcxq %rbp,%r14
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
adoxq %r15,%r13
movq %r11,-32(%rbx)
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
leaq 32(%rcx),%rcx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
movq %r13,-16(%rbx)
decq %rdi
jnz .Lmulx4x_inner
movq 0+8(%rsp),%rax
adcq %rbp,%r15
subq 0(%rbx),%rdi
movq 8+8(%rsp),%rdi
movq 16+8(%rsp),%r10
adcq %r15,%r14
leaq (%rsi,%rax,1),%rsi
adcq %rbp,%rbp
movq %r14,-8(%rbx)
cmpq %r10,%rdi
jb .Lmulx4x_outer
movq -8(%rcx),%r10
movq %rbp,%r8
movq (%rcx,%rax,1),%r12
leaq (%rcx,%rax,1),%rbp
movq %rax,%rcx
leaq (%rbx,%rax,1),%rdi
xorl %eax,%eax
xorq %r15,%r15
subq %r14,%r10
adcq %r15,%r15
orq %r15,%r8
sarq $3+2,%rcx
subq %r8,%rax
movq 56+8(%rsp),%rdx
decq %r12
movq 8(%rbp),%r13
xorq %r8,%r8
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp .Lsqrx4x_sub_entry
.cfi_endproc
.size mulx4x_internal,.-mulx4x_internal
.globl bn_powerx5
.hidden bn_powerx5
.type bn_powerx5,@function
.align 32
bn_powerx5:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
.Lpowerx5_prologue:
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb .Lpwrx_sp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp .Lpwrx_sp_done
.align 32
.Lpwrx_sp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
.Lpwrx_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lpwrx_page_walk
jmp .Lpwrx_page_walk_done
.Lpwrx_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja .Lpwrx_page_walk
.Lpwrx_page_walk_done:
movq %r9,%r10
negq %r9
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,72,15,110,209
.byte 102,73,15,110,218
.byte 102,72,15,110,226
movq %r8,32(%rsp)
movq %rax,40(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08
.Lpowerx5_body:
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
movq %r10,%r9
movq %rsi,%rdi
.byte 102,72,15,126,209
.byte 102,72,15,126,226
movq 40(%rsp),%rax
call mulx4x_internal
movq 40(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq $1,%rax
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpowerx5_epilogue:
ret
.cfi_endproc
.size bn_powerx5,.-bn_powerx5
.globl bn_sqrx8x_internal
.hidden bn_sqrx8x_internal
.hidden bn_sqrx8x_internal
.type bn_sqrx8x_internal,@function
.align 32
bn_sqrx8x_internal:
__bn_sqrx8x_internal:
.cfi_startproc
_CET_ENDBR
leaq 48+8(%rsp),%rdi
leaq (%rsi,%r9,1),%rbp
movq %r9,0+8(%rsp)
movq %rbp,8+8(%rsp)
jmp .Lsqr8x_zero_start
.align 32
.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
.Lsqrx8x_zero:
.byte 0x3e
movdqa %xmm0,0(%rdi)
movdqa %xmm0,16(%rdi)
movdqa %xmm0,32(%rdi)
movdqa %xmm0,48(%rdi)
.Lsqr8x_zero_start:
movdqa %xmm0,64(%rdi)
movdqa %xmm0,80(%rdi)
movdqa %xmm0,96(%rdi)
movdqa %xmm0,112(%rdi)
leaq 128(%rdi),%rdi
subq $64,%r9
jnz .Lsqrx8x_zero
movq 0(%rsi),%rdx
xorq %r10,%r10
xorq %r11,%r11
xorq %r12,%r12
xorq %r13,%r13
xorq %r14,%r14
xorq %r15,%r15
leaq 48+8(%rsp),%rdi
xorq %rbp,%rbp
jmp .Lsqrx8x_outer_loop
.align 32
.Lsqrx8x_outer_loop:
mulxq 8(%rsi),%r8,%rax
adcxq %r9,%r8
adoxq %rax,%r10
mulxq 16(%rsi),%r9,%rax
adcxq %r10,%r9
adoxq %rax,%r11
.byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00
adcxq %r11,%r10
adoxq %rax,%r12
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00
adcxq %r12,%r11
adoxq %rax,%r13
mulxq 40(%rsi),%r12,%rax
adcxq %r13,%r12
adoxq %rax,%r14
mulxq 48(%rsi),%r13,%rax
adcxq %r14,%r13
adoxq %r15,%rax
mulxq 56(%rsi),%r14,%r15
movq 8(%rsi),%rdx
adcxq %rax,%r14
adoxq %rbp,%r15
adcq 64(%rdi),%r15
movq %r8,8(%rdi)
movq %r9,16(%rdi)
sbbq %rcx,%rcx
xorq %rbp,%rbp
mulxq 16(%rsi),%r8,%rbx
mulxq 24(%rsi),%r9,%rax
adcxq %r10,%r8
adoxq %rbx,%r9
mulxq 32(%rsi),%r10,%rbx
adcxq %r11,%r9
adoxq %rax,%r10
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00
adcxq %r12,%r10
adoxq %rbx,%r11
.byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00
adcxq %r13,%r11
adoxq %r14,%r12
.byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00
movq 16(%rsi),%rdx
adcxq %rax,%r12
adoxq %rbx,%r13
adcxq %r15,%r13
adoxq %rbp,%r14
adcxq %rbp,%r14
movq %r8,24(%rdi)
movq %r9,32(%rdi)
mulxq 24(%rsi),%r8,%rbx
mulxq 32(%rsi),%r9,%rax
adcxq %r10,%r8
adoxq %rbx,%r9
mulxq 40(%rsi),%r10,%rbx
adcxq %r11,%r9
adoxq %rax,%r10
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00
adcxq %r12,%r10
adoxq %r13,%r11
.byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00
.byte 0x3e
movq 24(%rsi),%rdx
adcxq %rbx,%r11
adoxq %rax,%r12
adcxq %r14,%r12
movq %r8,40(%rdi)
movq %r9,48(%rdi)
mulxq 32(%rsi),%r8,%rax
adoxq %rbp,%r13
adcxq %rbp,%r13
mulxq 40(%rsi),%r9,%rbx
adcxq %r10,%r8
adoxq %rax,%r9
mulxq 48(%rsi),%r10,%rax
adcxq %r11,%r9
adoxq %r12,%r10
mulxq 56(%rsi),%r11,%r12
movq 32(%rsi),%rdx
movq 40(%rsi),%r14
adcxq %rbx,%r10
adoxq %rax,%r11
movq 48(%rsi),%r15
adcxq %r13,%r11
adoxq %rbp,%r12
adcxq %rbp,%r12
movq %r8,56(%rdi)
movq %r9,64(%rdi)
mulxq %r14,%r9,%rax
movq 56(%rsi),%r8
adcxq %r10,%r9
mulxq %r15,%r10,%rbx
adoxq %rax,%r10
adcxq %r11,%r10
mulxq %r8,%r11,%rax
movq %r14,%rdx
adoxq %rbx,%r11
adcxq %r12,%r11
adcxq %rbp,%rax
mulxq %r15,%r14,%rbx
mulxq %r8,%r12,%r13
movq %r15,%rdx
leaq 64(%rsi),%rsi
adcxq %r14,%r11
adoxq %rbx,%r12
adcxq %rax,%r12
adoxq %rbp,%r13
.byte 0x67,0x67
mulxq %r8,%r8,%r14
adcxq %r8,%r13
adcxq %rbp,%r14
cmpq 8+8(%rsp),%rsi
je .Lsqrx8x_outer_break
negq %rcx
movq $-8,%rcx
movq %rbp,%r15
movq 64(%rdi),%r8
adcxq 72(%rdi),%r9
adcxq 80(%rdi),%r10
adcxq 88(%rdi),%r11
adcq 96(%rdi),%r12
adcq 104(%rdi),%r13
adcq 112(%rdi),%r14
adcq 120(%rdi),%r15
leaq (%rsi),%rbp
leaq 128(%rdi),%rdi
sbbq %rax,%rax
movq -64(%rsi),%rdx
movq %rax,16+8(%rsp)
movq %rdi,24+8(%rsp)
xorl %eax,%eax
jmp .Lsqrx8x_loop
.align 32
.Lsqrx8x_loop:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rax,%rbx
adoxq %r9,%r8
mulxq 8(%rbp),%rax,%r9
adcxq %rax,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rax,%r10
adcxq %rax,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
movq %rbx,(%rdi,%rcx,8)
movl $0,%ebx
adcxq %rax,%r13
adoxq %r15,%r14
.byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00
movq 8(%rsi,%rcx,8),%rdx
adcxq %rax,%r14
adoxq %rbx,%r15
adcxq %rbx,%r15
.byte 0x67
incq %rcx
jnz .Lsqrx8x_loop
leaq 64(%rbp),%rbp
movq $-8,%rcx
cmpq 8+8(%rsp),%rbp
je .Lsqrx8x_break
subq 16+8(%rsp),%rbx
.byte 0x66
movq -64(%rsi),%rdx
adcxq 0(%rdi),%r8
adcxq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
.byte 0x67
sbbq %rax,%rax
xorl %ebx,%ebx
movq %rax,16+8(%rsp)
jmp .Lsqrx8x_loop
.align 32
.Lsqrx8x_break:
xorq %rbp,%rbp
subq 16+8(%rsp),%rbx
adcxq %rbp,%r8
movq 24+8(%rsp),%rcx
adcxq %rbp,%r9
movq 0(%rsi),%rdx
adcq $0,%r10
movq %r8,0(%rdi)
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
cmpq %rcx,%rdi
je .Lsqrx8x_outer_loop
movq %r9,8(%rdi)
movq 8(%rcx),%r9
movq %r10,16(%rdi)
movq 16(%rcx),%r10
movq %r11,24(%rdi)
movq 24(%rcx),%r11
movq %r12,32(%rdi)
movq 32(%rcx),%r12
movq %r13,40(%rdi)
movq 40(%rcx),%r13
movq %r14,48(%rdi)
movq 48(%rcx),%r14
movq %r15,56(%rdi)
movq 56(%rcx),%r15
movq %rcx,%rdi
jmp .Lsqrx8x_outer_loop
.align 32
.Lsqrx8x_outer_break:
movq %r9,72(%rdi)
.byte 102,72,15,126,217
movq %r10,80(%rdi)
movq %r11,88(%rdi)
movq %r12,96(%rdi)
movq %r13,104(%rdi)
movq %r14,112(%rdi)
leaq 48+8(%rsp),%rdi
movq (%rsi,%rcx,1),%rdx
movq 8(%rdi),%r11
xorq %r10,%r10
movq 0+8(%rsp),%r9
adoxq %r11,%r11
movq 16(%rdi),%r12
movq 24(%rdi),%r13
.align 32
.Lsqrx4x_shift_n_add:
mulxq %rdx,%rax,%rbx
adoxq %r12,%r12
adcxq %r10,%rax
.byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00
.byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00
adoxq %r13,%r13
adcxq %r11,%rbx
movq 40(%rdi),%r11
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r10,%r10
adcxq %r12,%rax
movq 16(%rsi,%rcx,1),%rdx
movq 48(%rdi),%r12
adoxq %r11,%r11
adcxq %r13,%rbx
movq 56(%rdi),%r13
movq %rax,16(%rdi)
movq %rbx,24(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r12,%r12
adcxq %r10,%rax
movq 24(%rsi,%rcx,1),%rdx
leaq 32(%rcx),%rcx
movq 64(%rdi),%r10
adoxq %r13,%r13
adcxq %r11,%rbx
movq 72(%rdi),%r11
movq %rax,32(%rdi)
movq %rbx,40(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r10,%r10
adcxq %r12,%rax
jrcxz .Lsqrx4x_shift_n_add_break
.byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00
adoxq %r11,%r11
adcxq %r13,%rbx
movq 80(%rdi),%r12
movq 88(%rdi),%r13
movq %rax,48(%rdi)
movq %rbx,56(%rdi)
leaq 64(%rdi),%rdi
nop
jmp .Lsqrx4x_shift_n_add
.align 32
.Lsqrx4x_shift_n_add_break:
adcxq %r13,%rbx
movq %rax,48(%rdi)
movq %rbx,56(%rdi)
leaq 64(%rdi),%rdi
.byte 102,72,15,126,213
__bn_sqrx8x_reduction:
xorl %eax,%eax
movq 32+8(%rsp),%rbx
movq 48+8(%rsp),%rdx
leaq -64(%rbp,%r9,1),%rcx
movq %rcx,0+8(%rsp)
movq %rdi,8+8(%rsp)
leaq 48+8(%rsp),%rdi
jmp .Lsqrx8x_reduction_loop
.align 32
.Lsqrx8x_reduction_loop:
movq 8(%rdi),%r9
movq 16(%rdi),%r10
movq 24(%rdi),%r11
movq 32(%rdi),%r12
movq %rdx,%r8
imulq %rbx,%rdx
movq 40(%rdi),%r13
movq 48(%rdi),%r14
movq 56(%rdi),%r15
movq %rax,24+8(%rsp)
leaq 64(%rdi),%rdi
xorq %rsi,%rsi
movq $-8,%rcx
jmp .Lsqrx8x_reduce
.align 32
.Lsqrx8x_reduce:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rbx,%rax
adoxq %r9,%r8
mulxq 8(%rbp),%rbx,%r9
adcxq %rbx,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rbx,%r10
adcxq %rbx,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rbx,%r11
adcxq %rbx,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00
movq %rdx,%rax
movq %r8,%rdx
adcxq %rbx,%r11
adoxq %r13,%r12
mulxq 32+8(%rsp),%rbx,%rdx
movq %rax,%rdx
movq %rax,64+48+8(%rsp,%rcx,8)
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
adcxq %rax,%r13
adoxq %r15,%r14
mulxq 56(%rbp),%rax,%r15
movq %rbx,%rdx
adcxq %rax,%r14
adoxq %rsi,%r15
adcxq %rsi,%r15
.byte 0x67,0x67,0x67
incq %rcx
jnz .Lsqrx8x_reduce
movq %rsi,%rax
cmpq 0+8(%rsp),%rbp
jae .Lsqrx8x_no_tail
movq 48+8(%rsp),%rdx
addq 0(%rdi),%r8
leaq 64(%rbp),%rbp
movq $-8,%rcx
adcxq 8(%rdi),%r9
adcxq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
sbbq %rax,%rax
xorq %rsi,%rsi
movq %rax,16+8(%rsp)
jmp .Lsqrx8x_tail
.align 32
.Lsqrx8x_tail:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rax,%rbx
adoxq %r9,%r8
mulxq 8(%rbp),%rax,%r9
adcxq %rax,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rax,%r10
adcxq %rax,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
adcxq %rax,%r13
adoxq %r15,%r14
mulxq 56(%rbp),%rax,%r15
movq 72+48+8(%rsp,%rcx,8),%rdx
adcxq %rax,%r14
adoxq %rsi,%r15
movq %rbx,(%rdi,%rcx,8)
movq %r8,%rbx
adcxq %rsi,%r15
incq %rcx
jnz .Lsqrx8x_tail
cmpq 0+8(%rsp),%rbp
jae .Lsqrx8x_tail_done
subq 16+8(%rsp),%rsi
movq 48+8(%rsp),%rdx
leaq 64(%rbp),%rbp
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
sbbq %rax,%rax
subq $8,%rcx
xorq %rsi,%rsi
movq %rax,16+8(%rsp)
jmp .Lsqrx8x_tail
.align 32
.Lsqrx8x_tail_done:
xorq %rax,%rax
addq 24+8(%rsp),%r8
adcq $0,%r9
adcq $0,%r10
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
adcq $0,%rax
subq 16+8(%rsp),%rsi
.Lsqrx8x_no_tail:
adcq 0(%rdi),%r8
.byte 102,72,15,126,217
adcq 8(%rdi),%r9
movq 56(%rbp),%rsi
.byte 102,72,15,126,213
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
adcq $0,%rax
movq 32+8(%rsp),%rbx
movq 64(%rdi,%rcx,1),%rdx
movq %r8,0(%rdi)
leaq 64(%rdi),%r8
movq %r9,8(%rdi)
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq %r12,32(%rdi)
movq %r13,40(%rdi)
movq %r14,48(%rdi)
movq %r15,56(%rdi)
leaq 64(%rdi,%rcx,1),%rdi
cmpq 8+8(%rsp),%r8
jb .Lsqrx8x_reduction_loop
ret
.cfi_endproc
.size bn_sqrx8x_internal,.-bn_sqrx8x_internal
.align 32
.type __bn_postx4x_internal,@function
__bn_postx4x_internal:
.cfi_startproc
movq 0(%rbp),%r12
movq %rcx,%r10
movq %rcx,%r9
negq %rax
sarq $3+2,%rcx
.byte 102,72,15,126,202
.byte 102,72,15,126,206
decq %r12
movq 8(%rbp),%r13
xorq %r8,%r8
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp .Lsqrx4x_sub_entry
.align 16
.Lsqrx4x_sub:
movq 0(%rbp),%r12
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
.Lsqrx4x_sub_entry:
andnq %rax,%r12,%r12
leaq 32(%rbp),%rbp
andnq %rax,%r13,%r13
andnq %rax,%r14,%r14
andnq %rax,%r15,%r15
negq %r8
adcq 0(%rdi),%r12
adcq 8(%rdi),%r13
adcq 16(%rdi),%r14
adcq 24(%rdi),%r15
movq %r12,0(%rdx)
leaq 32(%rdi),%rdi
movq %r13,8(%rdx)
sbbq %r8,%r8
movq %r14,16(%rdx)
movq %r15,24(%rdx)
leaq 32(%rdx),%rdx
incq %rcx
jnz .Lsqrx4x_sub
negq %r9
ret
.cfi_endproc
.size __bn_postx4x_internal,.-__bn_postx4x_internal
.globl bn_scatter5
.hidden bn_scatter5
.type bn_scatter5,@function
.align 16
bn_scatter5:
.cfi_startproc
_CET_ENDBR
cmpl $0,%esi
jz .Lscatter_epilogue
leaq (%rdx,%rcx,8),%rdx
.Lscatter:
movq (%rdi),%rax
leaq 8(%rdi),%rdi
movq %rax,(%rdx)
leaq 256(%rdx),%rdx
subl $1,%esi
jnz .Lscatter
.Lscatter_epilogue:
ret
.cfi_endproc
.size bn_scatter5,.-bn_scatter5
.globl bn_gather5
.hidden bn_gather5
.type bn_gather5,@function
.align 32
bn_gather5:
.cfi_startproc
.LSEH_begin_bn_gather5:
_CET_ENDBR
.byte 0x4c,0x8d,0x14,0x24
.cfi_def_cfa_register %r10
.byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00
leaq .Linc(%rip),%rax
andq $-16,%rsp
movd %ecx,%xmm5
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 128(%rdx),%r11
leaq 128(%rsp),%rax
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,-128(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,-112(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,-96(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,-80(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,-64(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,-48(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,-32(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,-16(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,0(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,16(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,32(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,48(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,64(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,80(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,96(%rax)
movdqa %xmm4,%xmm2
movdqa %xmm3,112(%rax)
jmp .Lgather
.align 32
.Lgather:
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa -128(%r11),%xmm0
movdqa -112(%r11),%xmm1
movdqa -96(%r11),%xmm2
pand -128(%rax),%xmm0
movdqa -80(%r11),%xmm3
pand -112(%rax),%xmm1
por %xmm0,%xmm4
pand -96(%rax),%xmm2
por %xmm1,%xmm5
pand -80(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%r11),%xmm0
movdqa -48(%r11),%xmm1
movdqa -32(%r11),%xmm2
pand -64(%rax),%xmm0
movdqa -16(%r11),%xmm3
pand -48(%rax),%xmm1
por %xmm0,%xmm4
pand -32(%rax),%xmm2
por %xmm1,%xmm5
pand -16(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%r11),%xmm0
movdqa 16(%r11),%xmm1
movdqa 32(%r11),%xmm2
pand 0(%rax),%xmm0
movdqa 48(%r11),%xmm3
pand 16(%rax),%xmm1
por %xmm0,%xmm4
pand 32(%rax),%xmm2
por %xmm1,%xmm5
pand 48(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%r11),%xmm0
movdqa 80(%r11),%xmm1
movdqa 96(%r11),%xmm2
pand 64(%rax),%xmm0
movdqa 112(%r11),%xmm3
pand 80(%rax),%xmm1
por %xmm0,%xmm4
pand 96(%rax),%xmm2
por %xmm1,%xmm5
pand 112(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
leaq 256(%r11),%r11
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
movq %xmm0,(%rdi)
leaq 8(%rdi),%rdi
subl $1,%esi
jnz .Lgather
leaq (%r10),%rsp
.cfi_def_cfa_register %rsp
ret
.LSEH_end_bn_gather5:
.cfi_endproc
.size bn_gather5,.-bn_gather5
.section .rodata
.align 64
.Linc:
.long 0,0, 1,1
.long 2,2, 2,2
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 21,653
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghash-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _gcm_init_clmul
.private_extern _gcm_init_clmul
.p2align 4
_gcm_init_clmul:
_CET_ENDBR
L$_init_clmul:
movdqu (%rsi),%xmm2
pshufd $78,%xmm2,%xmm2
pshufd $255,%xmm2,%xmm4
movdqa %xmm2,%xmm3
psllq $1,%xmm2
pxor %xmm5,%xmm5
psrlq $63,%xmm3
pcmpgtd %xmm4,%xmm5
pslldq $8,%xmm3
por %xmm3,%xmm2
pand L$0x1c2_polynomial(%rip),%xmm5
pxor %xmm5,%xmm2
pshufd $78,%xmm2,%xmm6
movdqa %xmm2,%xmm0
pxor %xmm2,%xmm6
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm2,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm2,%xmm3
movdqu %xmm2,0(%rdi)
pxor %xmm0,%xmm4
movdqu %xmm0,16(%rdi)
.byte 102,15,58,15,227,8
movdqu %xmm4,32(%rdi)
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
movdqa %xmm0,%xmm5
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm5,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm5,%xmm3
movdqu %xmm5,48(%rdi)
pxor %xmm0,%xmm4
movdqu %xmm0,64(%rdi)
.byte 102,15,58,15,227,8
movdqu %xmm4,80(%rdi)
ret
.globl _gcm_ghash_clmul
.private_extern _gcm_ghash_clmul
.p2align 5
_gcm_ghash_clmul:
_CET_ENDBR
L$_ghash_clmul:
movdqa L$bswap_mask(%rip),%xmm10
movdqu (%rdi),%xmm0
movdqu (%rsi),%xmm2
movdqu 32(%rsi),%xmm7
.byte 102,65,15,56,0,194
subq $0x10,%rcx
jz L$odd_tail
movdqu 16(%rsi),%xmm6
cmpq $0x30,%rcx
jb L$skip4x
subq $0x30,%rcx
movq $0xA040608020C0E000,%rax
movdqu 48(%rsi),%xmm14
movdqu 64(%rsi),%xmm15
movdqu 48(%rdx),%xmm3
movdqu 32(%rdx),%xmm11
.byte 102,65,15,56,0,218
.byte 102,69,15,56,0,218
movdqa %xmm3,%xmm5
pshufd $78,%xmm3,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,68,218,0
.byte 102,15,58,68,234,17
.byte 102,15,58,68,231,0
movdqa %xmm11,%xmm13
pshufd $78,%xmm11,%xmm12
pxor %xmm11,%xmm12
.byte 102,68,15,58,68,222,0
.byte 102,68,15,58,68,238,17
.byte 102,68,15,58,68,231,16
xorps %xmm11,%xmm3
xorps %xmm13,%xmm5
movups 80(%rsi),%xmm7
xorps %xmm12,%xmm4
movdqu 16(%rdx),%xmm11
movdqu 0(%rdx),%xmm8
.byte 102,69,15,56,0,218
.byte 102,69,15,56,0,194
movdqa %xmm11,%xmm13
pshufd $78,%xmm11,%xmm12
pxor %xmm8,%xmm0
pxor %xmm11,%xmm12
.byte 102,69,15,58,68,222,0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm8
pxor %xmm0,%xmm8
.byte 102,69,15,58,68,238,17
.byte 102,68,15,58,68,231,0
xorps %xmm11,%xmm3
xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx
subq $0x40,%rcx
jc L$tail4x
jmp L$mod4_loop
.p2align 5
L$mod4_loop:
.byte 102,65,15,58,68,199,0
xorps %xmm12,%xmm4
movdqu 48(%rdx),%xmm11
.byte 102,69,15,56,0,218
.byte 102,65,15,58,68,207,17
xorps %xmm3,%xmm0
movdqu 32(%rdx),%xmm3
movdqa %xmm11,%xmm13
.byte 102,68,15,58,68,199,16
pshufd $78,%xmm11,%xmm12
xorps %xmm5,%xmm1
pxor %xmm11,%xmm12
.byte 102,65,15,56,0,218
movups 32(%rsi),%xmm7
xorps %xmm4,%xmm8
.byte 102,68,15,58,68,218,0
pshufd $78,%xmm3,%xmm4
pxor %xmm0,%xmm8
movdqa %xmm3,%xmm5
pxor %xmm1,%xmm8
pxor %xmm3,%xmm4
movdqa %xmm8,%xmm9
.byte 102,68,15,58,68,234,17
pslldq $8,%xmm8
psrldq $8,%xmm9
pxor %xmm8,%xmm0
movdqa L$7_mask(%rip),%xmm8
pxor %xmm9,%xmm1
.byte 102,76,15,110,200
pand %xmm0,%xmm8
.byte 102,69,15,56,0,200
pxor %xmm0,%xmm9
.byte 102,68,15,58,68,231,0
psllq $57,%xmm9
movdqa %xmm9,%xmm8
pslldq $8,%xmm9
.byte 102,15,58,68,222,0
psrldq $8,%xmm8
pxor %xmm9,%xmm0
pxor %xmm8,%xmm1
movdqu 0(%rdx),%xmm8
movdqa %xmm0,%xmm9
psrlq $1,%xmm0
.byte 102,15,58,68,238,17
xorps %xmm11,%xmm3
movdqu 16(%rdx),%xmm11
.byte 102,69,15,56,0,218
.byte 102,15,58,68,231,16
xorps %xmm13,%xmm5
movups 80(%rsi),%xmm7
.byte 102,69,15,56,0,194
pxor %xmm9,%xmm1
pxor %xmm0,%xmm9
psrlq $5,%xmm0
movdqa %xmm11,%xmm13
pxor %xmm12,%xmm4
pshufd $78,%xmm11,%xmm12
pxor %xmm9,%xmm0
pxor %xmm8,%xmm1
pxor %xmm11,%xmm12
.byte 102,69,15,58,68,222,0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
movdqa %xmm0,%xmm1
.byte 102,69,15,58,68,238,17
xorps %xmm11,%xmm3
pshufd $78,%xmm0,%xmm8
pxor %xmm0,%xmm8
.byte 102,68,15,58,68,231,0
xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx
subq $0x40,%rcx
jnc L$mod4_loop
L$tail4x:
.byte 102,65,15,58,68,199,0
.byte 102,65,15,58,68,207,17
.byte 102,68,15,58,68,199,16
xorps %xmm12,%xmm4
xorps %xmm3,%xmm0
xorps %xmm5,%xmm1
pxor %xmm0,%xmm1
pxor %xmm4,%xmm8
pxor %xmm1,%xmm8
pxor %xmm0,%xmm1
movdqa %xmm8,%xmm9
psrldq $8,%xmm8
pslldq $8,%xmm9
pxor %xmm8,%xmm1
pxor %xmm9,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
addq $0x40,%rcx
jz L$done
movdqu 32(%rsi),%xmm7
subq $0x10,%rcx
jz L$odd_tail
L$skip4x:
movdqu (%rdx),%xmm8
movdqu 16(%rdx),%xmm3
.byte 102,69,15,56,0,194
.byte 102,65,15,56,0,218
pxor %xmm8,%xmm0
movdqa %xmm3,%xmm5
pshufd $78,%xmm3,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,68,218,0
.byte 102,15,58,68,234,17
.byte 102,15,58,68,231,0
leaq 32(%rdx),%rdx
nop
subq $0x20,%rcx
jbe L$even_tail
nop
jmp L$mod_loop
.p2align 5
L$mod_loop:
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm8
pshufd $78,%xmm0,%xmm4
pxor %xmm0,%xmm4
.byte 102,15,58,68,198,0
.byte 102,15,58,68,206,17
.byte 102,15,58,68,231,16
pxor %xmm3,%xmm0
pxor %xmm5,%xmm1
movdqu (%rdx),%xmm9
pxor %xmm0,%xmm8
.byte 102,69,15,56,0,202
movdqu 16(%rdx),%xmm3
pxor %xmm1,%xmm8
pxor %xmm9,%xmm1
pxor %xmm8,%xmm4
.byte 102,65,15,56,0,218
movdqa %xmm4,%xmm8
psrldq $8,%xmm8
pslldq $8,%xmm4
pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm3,%xmm5
movdqa %xmm0,%xmm9
movdqa %xmm0,%xmm8
psllq $5,%xmm0
pxor %xmm0,%xmm8
.byte 102,15,58,68,218,0
psllq $1,%xmm0
pxor %xmm8,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm8
pslldq $8,%xmm0
psrldq $8,%xmm8
pxor %xmm9,%xmm0
pshufd $78,%xmm5,%xmm4
pxor %xmm8,%xmm1
pxor %xmm5,%xmm4
movdqa %xmm0,%xmm9
psrlq $1,%xmm0
.byte 102,15,58,68,234,17
pxor %xmm9,%xmm1
pxor %xmm0,%xmm9
psrlq $5,%xmm0
pxor %xmm9,%xmm0
leaq 32(%rdx),%rdx
psrlq $1,%xmm0
.byte 102,15,58,68,231,0
pxor %xmm1,%xmm0
subq $0x20,%rcx
ja L$mod_loop
L$even_tail:
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm8
pshufd $78,%xmm0,%xmm4
pxor %xmm0,%xmm4
.byte 102,15,58,68,198,0
.byte 102,15,58,68,206,17
.byte 102,15,58,68,231,16
pxor %xmm3,%xmm0
pxor %xmm5,%xmm1
pxor %xmm0,%xmm8
pxor %xmm1,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm8
psrldq $8,%xmm8
pslldq $8,%xmm4
pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
testq %rcx,%rcx
jnz L$done
L$odd_tail:
movdqu (%rdx),%xmm8
.byte 102,69,15,56,0,194
pxor %xmm8,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,223,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
L$done:
.byte 102,65,15,56,0,194
movdqu %xmm0,(%rdi)
ret
.globl _gcm_init_avx
.private_extern _gcm_init_avx
.p2align 5
_gcm_init_avx:
_CET_ENDBR
vzeroupper
vmovdqu (%rsi),%xmm2
vpshufd $78,%xmm2,%xmm2
vpshufd $255,%xmm2,%xmm4
vpsrlq $63,%xmm2,%xmm3
vpsllq $1,%xmm2,%xmm2
vpxor %xmm5,%xmm5,%xmm5
vpcmpgtd %xmm4,%xmm5,%xmm5
vpslldq $8,%xmm3,%xmm3
vpor %xmm3,%xmm2,%xmm2
vpand L$0x1c2_polynomial(%rip),%xmm5,%xmm5
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm2,%xmm2,%xmm6
vmovdqa %xmm2,%xmm0
vpxor %xmm2,%xmm6,%xmm6
movq $4,%r10
jmp L$init_start_avx
.p2align 5
L$init_loop_avx:
vpalignr $8,%xmm3,%xmm4,%xmm5
vmovdqu %xmm5,-16(%rdi)
vpunpckhqdq %xmm0,%xmm0,%xmm3
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3
vpxor %xmm0,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $8,%xmm3,%xmm4
vpsrldq $8,%xmm3,%xmm3
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm3,%xmm1,%xmm1
vpsllq $57,%xmm0,%xmm3
vpsllq $62,%xmm0,%xmm4
vpxor %xmm3,%xmm4,%xmm4
vpsllq $63,%xmm0,%xmm3
vpxor %xmm3,%xmm4,%xmm4
vpslldq $8,%xmm4,%xmm3
vpsrldq $8,%xmm4,%xmm4
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm4,%xmm1,%xmm1
vpsrlq $1,%xmm0,%xmm4
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $5,%xmm4,%xmm4
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $1,%xmm0,%xmm0
vpxor %xmm1,%xmm0,%xmm0
L$init_start_avx:
vmovdqa %xmm0,%xmm5
vpunpckhqdq %xmm0,%xmm0,%xmm3
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3
vpxor %xmm0,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $8,%xmm3,%xmm4
vpsrldq $8,%xmm3,%xmm3
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm3,%xmm1,%xmm1
vpsllq $57,%xmm0,%xmm3
vpsllq $62,%xmm0,%xmm4
vpxor %xmm3,%xmm4,%xmm4
vpsllq $63,%xmm0,%xmm3
vpxor %xmm3,%xmm4,%xmm4
vpslldq $8,%xmm4,%xmm3
vpsrldq $8,%xmm4,%xmm4
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm4,%xmm1,%xmm1
vpsrlq $1,%xmm0,%xmm4
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $5,%xmm4,%xmm4
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $1,%xmm0,%xmm0
vpxor %xmm1,%xmm0,%xmm0
vpshufd $78,%xmm5,%xmm3
vpshufd $78,%xmm0,%xmm4
vpxor %xmm5,%xmm3,%xmm3
vmovdqu %xmm5,0(%rdi)
vpxor %xmm0,%xmm4,%xmm4
vmovdqu %xmm0,16(%rdi)
leaq 48(%rdi),%rdi
subq $1,%r10
jnz L$init_loop_avx
vpalignr $8,%xmm4,%xmm3,%xmm5
vmovdqu %xmm5,-16(%rdi)
vzeroupper
ret
.globl _gcm_ghash_avx
.private_extern _gcm_ghash_avx
.p2align 5
_gcm_ghash_avx:
_CET_ENDBR
vzeroupper
vmovdqu (%rdi),%xmm10
leaq L$0x1c2_polynomial(%rip),%r10
leaq 64(%rsi),%rsi
vmovdqu L$bswap_mask(%rip),%xmm13
vpshufb %xmm13,%xmm10,%xmm10
cmpq $0x80,%rcx
jb L$short_avx
subq $0x80,%rcx
vmovdqu 112(%rdx),%xmm14
vmovdqu 0-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm14
vmovdqu 32-64(%rsi),%xmm7
vpunpckhqdq %xmm14,%xmm14,%xmm9
vmovdqu 96(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm14,%xmm9,%xmm9
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 16-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vmovdqu 80(%rdx),%xmm14
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 48-64(%rsi),%xmm6
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 64(%rdx),%xmm15
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 80-64(%rsi),%xmm7
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vmovdqu 48(%rdx),%xmm14
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm4,%xmm1,%xmm1
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 96-64(%rsi),%xmm6
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 128-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 32(%rdx),%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vmovdqu 16(%rdx),%xmm14
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm4,%xmm1,%xmm1
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 144-64(%rsi),%xmm6
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 176-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu (%rdx),%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 160-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2
leaq 128(%rdx),%rdx
cmpq $0x80,%rcx
jb L$tail_avx
vpxor %xmm10,%xmm15,%xmm15
subq $0x80,%rcx
jmp L$oop8x_avx
.p2align 5
L$oop8x_avx:
vpunpckhqdq %xmm15,%xmm15,%xmm8
vmovdqu 112(%rdx),%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpxor %xmm15,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm10
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm11
vmovdqu 0-64(%rsi),%xmm6
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm12
vmovdqu 32-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 96(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm3,%xmm10,%xmm10
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vxorps %xmm4,%xmm11,%xmm11
vmovdqu 16-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm5,%xmm12,%xmm12
vxorps %xmm15,%xmm8,%xmm8
vmovdqu 80(%rdx),%xmm14
vpxor %xmm10,%xmm12,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm11,%xmm12,%xmm12
vpslldq $8,%xmm12,%xmm9
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vpsrldq $8,%xmm12,%xmm12
vpxor %xmm9,%xmm10,%xmm10
vmovdqu 48-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm14
vxorps %xmm12,%xmm11,%xmm11
vpxor %xmm1,%xmm4,%xmm4
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 80-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 64(%rdx),%xmm15
vpalignr $8,%xmm10,%xmm10,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vxorps %xmm15,%xmm8,%xmm8
vpxor %xmm5,%xmm2,%xmm2
vmovdqu 48(%rdx),%xmm14
vpclmulqdq $0x10,(%r10),%xmm10,%xmm10
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 96-64(%rsi),%xmm6
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 128-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 32(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vpxor %xmm5,%xmm2,%xmm2
vxorps %xmm12,%xmm10,%xmm10
vmovdqu 16(%rdx),%xmm14
vpalignr $8,%xmm10,%xmm10,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 144-64(%rsi),%xmm6
vpclmulqdq $0x10,(%r10),%xmm10,%xmm10
vxorps %xmm11,%xmm12,%xmm12
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 176-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu (%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 160-64(%rsi),%xmm6
vpxor %xmm12,%xmm15,%xmm15
vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2
vpxor %xmm10,%xmm15,%xmm15
leaq 128(%rdx),%rdx
subq $0x80,%rcx
jnc L$oop8x_avx
addq $0x80,%rcx
jmp L$tail_no_xor_avx
.p2align 5
L$short_avx:
vmovdqu -16(%rdx,%rcx,1),%xmm14
leaq (%rdx,%rcx,1),%rdx
vmovdqu 0-64(%rsi),%xmm6
vmovdqu 32-64(%rsi),%xmm7
vpshufb %xmm13,%xmm14,%xmm15
vmovdqa %xmm0,%xmm3
vmovdqa %xmm1,%xmm4
vmovdqa %xmm2,%xmm5
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -32(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 16-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -48(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 48-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu 80-64(%rsi),%xmm7
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -64(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -80(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 96-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu 128-64(%rsi),%xmm7
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -96(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz L$tail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -112(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 144-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovq 184-64(%rsi),%xmm7
subq $0x10,%rcx
jmp L$tail_avx
.p2align 5
L$tail_avx:
vpxor %xmm10,%xmm15,%xmm15
L$tail_no_xor_avx:
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu (%r10),%xmm12
vpxor %xmm0,%xmm3,%xmm10
vpxor %xmm1,%xmm4,%xmm11
vpxor %xmm2,%xmm5,%xmm5
vpxor %xmm10,%xmm5,%xmm5
vpxor %xmm11,%xmm5,%xmm5
vpslldq $8,%xmm5,%xmm9
vpsrldq $8,%xmm5,%xmm5
vpxor %xmm9,%xmm10,%xmm10
vpxor %xmm5,%xmm11,%xmm11
vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9
vpalignr $8,%xmm10,%xmm10,%xmm10
vpxor %xmm9,%xmm10,%xmm10
vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9
vpalignr $8,%xmm10,%xmm10,%xmm10
vpxor %xmm11,%xmm10,%xmm10
vpxor %xmm9,%xmm10,%xmm10
cmpq $0,%rcx
jne L$short_avx
vpshufb %xmm13,%xmm10,%xmm10
vmovdqu %xmm10,(%rdi)
vzeroupper
ret
.section __DATA,__const
.p2align 6
L$bswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
L$0x1c2_polynomial:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
L$7_mask:
.long 7,0,7,0
.p2align 6
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 6
.text
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 6,269
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghash-armv4-linux32.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL
@ instructions are in aesv8-armx.pl.)
.arch armv7-a
.text
#if defined(__thumb2__) || defined(__clang__)
.syntax unified
#define ldrplb ldrbpl
#define ldrneb ldrbne
#endif
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl gcm_init_neon
.hidden gcm_init_neon
.type gcm_init_neon,%function
.align 4
gcm_init_neon:
vld1.64 d7,[r1]! @ load H
vmov.i8 q8,#0xe1
vld1.64 d6,[r1]
vshl.i64 d17,#57
vshr.u64 d16,#63 @ t0=0xc2....01
vdup.8 q9,d7[7]
vshr.u64 d26,d6,#63
vshr.s8 q9,#7 @ broadcast carry bit
vshl.i64 q3,q3,#1
vand q8,q8,q9
vorr d7,d26 @ H<<<=1
veor q3,q3,q8 @ twisted H
vstmia r0,{q3}
bx lr @ bx lr
.size gcm_init_neon,.-gcm_init_neon
.globl gcm_gmult_neon
.hidden gcm_gmult_neon
.type gcm_gmult_neon,%function
.align 4
gcm_gmult_neon:
vld1.64 d7,[r0]! @ load Xi
vld1.64 d6,[r0]!
vmov.i64 d29,#0x0000ffffffffffff
vldmia r1,{d26,d27} @ load twisted H
vmov.i64 d30,#0x00000000ffffffff
#ifdef __ARMEL__
vrev64.8 q3,q3
#endif
vmov.i64 d31,#0x000000000000ffff
veor d28,d26,d27 @ Karatsuba pre-processing
mov r3,#16
b .Lgmult_neon
.size gcm_gmult_neon,.-gcm_gmult_neon
.globl gcm_ghash_neon
.hidden gcm_ghash_neon
.type gcm_ghash_neon,%function
.align 4
gcm_ghash_neon:
vld1.64 d1,[r0]! @ load Xi
vld1.64 d0,[r0]!
vmov.i64 d29,#0x0000ffffffffffff
vldmia r1,{d26,d27} @ load twisted H
vmov.i64 d30,#0x00000000ffffffff
#ifdef __ARMEL__
vrev64.8 q0,q0
#endif
vmov.i64 d31,#0x000000000000ffff
veor d28,d26,d27 @ Karatsuba pre-processing
.Loop_neon:
vld1.64 d7,[r2]! @ load inp
vld1.64 d6,[r2]!
#ifdef __ARMEL__
vrev64.8 q3,q3
#endif
veor q3,q0 @ inp^=Xi
.Lgmult_neon:
vext.8 d16, d26, d26, #1 @ A1
vmull.p8 q8, d16, d6 @ F = A1*B
vext.8 d0, d6, d6, #1 @ B1
vmull.p8 q0, d26, d0 @ E = A*B1
vext.8 d18, d26, d26, #2 @ A2
vmull.p8 q9, d18, d6 @ H = A2*B
vext.8 d22, d6, d6, #2 @ B2
vmull.p8 q11, d26, d22 @ G = A*B2
vext.8 d20, d26, d26, #3 @ A3
veor q8, q8, q0 @ L = E + F
vmull.p8 q10, d20, d6 @ J = A3*B
vext.8 d0, d6, d6, #3 @ B3
veor q9, q9, q11 @ M = G + H
vmull.p8 q0, d26, d0 @ I = A*B3
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
vand d17, d17, d29
vext.8 d22, d6, d6, #4 @ B4
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
vand d19, d19, d30
vmull.p8 q11, d26, d22 @ K = A*B4
veor q10, q10, q0 @ N = I + J
veor d16, d16, d17
veor d18, d18, d19
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
vand d21, d21, d31
vext.8 q8, q8, q8, #15
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
vmov.i64 d23, #0
vext.8 q9, q9, q9, #14
veor d20, d20, d21
vmull.p8 q0, d26, d6 @ D = A*B
vext.8 q11, q11, q11, #12
vext.8 q10, q10, q10, #13
veor q8, q8, q9
veor q10, q10, q11
veor q0, q0, q8
veor q0, q0, q10
veor d6,d6,d7 @ Karatsuba pre-processing
vext.8 d16, d28, d28, #1 @ A1
vmull.p8 q8, d16, d6 @ F = A1*B
vext.8 d2, d6, d6, #1 @ B1
vmull.p8 q1, d28, d2 @ E = A*B1
vext.8 d18, d28, d28, #2 @ A2
vmull.p8 q9, d18, d6 @ H = A2*B
vext.8 d22, d6, d6, #2 @ B2
vmull.p8 q11, d28, d22 @ G = A*B2
vext.8 d20, d28, d28, #3 @ A3
veor q8, q8, q1 @ L = E + F
vmull.p8 q10, d20, d6 @ J = A3*B
vext.8 d2, d6, d6, #3 @ B3
veor q9, q9, q11 @ M = G + H
vmull.p8 q1, d28, d2 @ I = A*B3
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
vand d17, d17, d29
vext.8 d22, d6, d6, #4 @ B4
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
vand d19, d19, d30
vmull.p8 q11, d28, d22 @ K = A*B4
veor q10, q10, q1 @ N = I + J
veor d16, d16, d17
veor d18, d18, d19
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
vand d21, d21, d31
vext.8 q8, q8, q8, #15
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
vmov.i64 d23, #0
vext.8 q9, q9, q9, #14
veor d20, d20, d21
vmull.p8 q1, d28, d6 @ D = A*B
vext.8 q11, q11, q11, #12
vext.8 q10, q10, q10, #13
veor q8, q8, q9
veor q10, q10, q11
veor q1, q1, q8
veor q1, q1, q10
vext.8 d16, d27, d27, #1 @ A1
vmull.p8 q8, d16, d7 @ F = A1*B
vext.8 d4, d7, d7, #1 @ B1
vmull.p8 q2, d27, d4 @ E = A*B1
vext.8 d18, d27, d27, #2 @ A2
vmull.p8 q9, d18, d7 @ H = A2*B
vext.8 d22, d7, d7, #2 @ B2
vmull.p8 q11, d27, d22 @ G = A*B2
vext.8 d20, d27, d27, #3 @ A3
veor q8, q8, q2 @ L = E + F
vmull.p8 q10, d20, d7 @ J = A3*B
vext.8 d4, d7, d7, #3 @ B3
veor q9, q9, q11 @ M = G + H
vmull.p8 q2, d27, d4 @ I = A*B3
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
vand d17, d17, d29
vext.8 d22, d7, d7, #4 @ B4
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
vand d19, d19, d30
vmull.p8 q11, d27, d22 @ K = A*B4
veor q10, q10, q2 @ N = I + J
veor d16, d16, d17
veor d18, d18, d19
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
vand d21, d21, d31
vext.8 q8, q8, q8, #15
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
vmov.i64 d23, #0
vext.8 q9, q9, q9, #14
veor d20, d20, d21
vmull.p8 q2, d27, d7 @ D = A*B
vext.8 q11, q11, q11, #12
vext.8 q10, q10, q10, #13
veor q8, q8, q9
veor q10, q10, q11
veor q2, q2, q8
veor q2, q2, q10
veor q1,q1,q0 @ Karatsuba post-processing
veor q1,q1,q2
veor d1,d1,d2
veor d4,d4,d3 @ Xh|Xl - 256-bit result
@ equivalent of reduction_avx from ghash-x86_64.pl
vshl.i64 q9,q0,#57 @ 1st phase
vshl.i64 q10,q0,#62
veor q10,q10,q9 @
vshl.i64 q9,q0,#63
veor q10, q10, q9 @
veor d1,d1,d20 @
veor d4,d4,d21
vshr.u64 q10,q0,#1 @ 2nd phase
veor q2,q2,q0
veor q0,q0,q10 @
vshr.u64 q10,q10,#6
vshr.u64 q0,q0,#1 @
veor q0,q0,q2 @
veor q0,q0,q10 @
subs r3,#16
bne .Loop_neon
#ifdef __ARMEL__
vrev64.8 q0,q0
#endif
sub r0,#16
vst1.64 d1,[r0]! @ write out Xi
vst1.64 d0,[r0]
bx lr @ bx lr
.size gcm_ghash_neon,.-gcm_ghash_neon
#endif
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 35,401
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/p256-armv8-asm-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.section __TEXT,__const
.align 5
Lpoly:
.quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001
LRR: // 2^512 mod P precomputed for NIST P256 polynomial
.quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd
Lone_mont:
.quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
Lone:
.quad 1,0,0,0
Lord:
.quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000
LordK:
.quad 0xccd1c8aaee00bc4f
.byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
// void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
// const BN_ULONG x2[4]);
.globl _ecp_nistz256_mul_mont
.private_extern _ecp_nistz256_mul_mont
.align 4
_ecp_nistz256_mul_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,Lpoly@PAGE
add x13,x13,Lpoly@PAGEOFF
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_mul_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl _ecp_nistz256_sqr_mont
.private_extern _ecp_nistz256_sqr_mont
.align 4
_ecp_nistz256_sqr_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,Lpoly@PAGE
add x13,x13,Lpoly@PAGEOFF
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sqr_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl _ecp_nistz256_neg
.private_extern _ecp_nistz256_neg
.align 4
_ecp_nistz256_neg:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x2,x1
mov x14,xzr // a = 0
mov x15,xzr
mov x16,xzr
mov x17,xzr
adrp x13,Lpoly@PAGE
add x13,x13,Lpoly@PAGEOFF
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sub_from
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
// note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
// to x4-x7 and b[0] - to x3
.align 4
__ecp_nistz256_mul_mont:
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x11,x7,x3
ldr x3,[x2,#8] // b[1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adc x19,xzr,x11
mov x20,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(1+1)] // b[1+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(2+1)] // b[2+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
// last reduction
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adcs x17,x19,x11
adc x19,x20,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
// note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
// to x4-x7
.align 4
__ecp_nistz256_sqr_mont:
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x2,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
lsl x8,x14,#32
adcs x1,x1,x11
lsr x9,x14,#32
adc x2,x2,x7
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adc x17,x11,xzr // can't overflow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x2
adc x19,xzr,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
// Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to
// x4-x7 and x8-x11. This is done because it's used in multiple
// contexts, e.g. in multiplication by 2 and 3...
.align 4
__ecp_nistz256_add_to:
adds x14,x14,x8 // ret = a+b
adcs x15,x15,x9
adcs x16,x16,x10
adcs x17,x17,x11
adc x1,xzr,xzr // zap x1
adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x1,xzr // did subtraction borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
.align 4
__ecp_nistz256_sub_from:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x14,x8 // ret = a-b
sbcs x15,x15,x9
sbcs x16,x16,x10
sbcs x17,x17,x11
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.align 4
__ecp_nistz256_sub_morf:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x8,x14 // ret = b-a
sbcs x15,x9,x15
sbcs x16,x10,x16
sbcs x17,x11,x17
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.align 4
__ecp_nistz256_div_by_2:
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adcs x11,x17,x13
adc x1,xzr,xzr // zap x1
tst x14,#1 // is a even?
csel x14,x14,x8,eq // ret = even ? a : a+modulus
csel x15,x15,x9,eq
csel x16,x16,x10,eq
csel x17,x17,x11,eq
csel x1,xzr,x1,eq
lsr x14,x14,#1 // ret >>= 1
orr x14,x14,x15,lsl#63
lsr x15,x15,#1
orr x15,x15,x16,lsl#63
lsr x16,x16,#1
orr x16,x16,x17,lsl#63
lsr x17,x17,#1
stp x14,x15,[x0]
orr x17,x17,x1,lsl#63
stp x16,x17,[x0,#16]
ret
.globl _ecp_nistz256_point_double
.private_extern _ecp_nistz256_point_double
.align 5
_ecp_nistz256_point_double:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
sub sp,sp,#32*4
Ldouble_shortcut:
ldp x14,x15,[x1,#32]
mov x21,x0
ldp x16,x17,[x1,#48]
mov x22,x1
adrp x13,Lpoly@PAGE
add x13,x13,Lpoly@PAGEOFF
ldr x12,[x13,#8]
mov x8,x14
ldr x13,[x13,#24]
mov x9,x15
ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[x22,#64+16]
add x0,sp,#0
bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y);
add x0,sp,#64
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z);
ldp x8,x9,[x22]
ldp x10,x11,[x22,#16]
mov x4,x14 // put Zsqr aside for p256_sub
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x);
add x2,x22,#0
mov x14,x4 // restore Zsqr
mov x15,x5
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x16,x6
mov x17,x7
ldp x6,x7,[sp,#0+16]
add x0,sp,#64
bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr);
add x0,sp,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S);
ldr x3,[x22,#32]
ldp x4,x5,[x22,#64]
ldp x6,x7,[x22,#64+16]
add x2,x22,#32
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#0+16]
add x0,x21,#64
bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0);
add x0,sp,#96
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S);
ldr x3,[sp,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x0,x21,#32
bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0);
add x2,sp,#64
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr);
mov x8,x14 // duplicate M
mov x9,x15
mov x10,x16
mov x11,x17
mov x4,x14 // put M aside
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to
mov x8,x4 // restore M
mov x9,x5
ldr x3,[x22] // forward load for p256_mul_mont
mov x10,x6
ldp x4,x5,[sp,#0]
mov x11,x7
ldp x6,x7,[sp,#0+16]
bl __ecp_nistz256_add_to // p256_mul_by_3(M, M);
add x2,x22,#0
add x0,sp,#0
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#32+16]
add x0,sp,#96
bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S);
add x0,x21,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M);
add x2,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0);
add x2,sp,#0
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x);
ldr x3,[sp,#32]
mov x4,x14 // copy S
mov x5,x15
mov x6,x16
mov x7,x17
add x2,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M);
add x2,x21,#32
add x0,x21,#32
bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y);
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _ecp_nistz256_point_add
.private_extern _ecp_nistz256_point_add
.align 5
_ecp_nistz256_point_add:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#32*12
ldp x4,x5,[x2,#64] // in2_z
ldp x6,x7,[x2,#64+16]
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,Lpoly@PAGE
add x13,x13,Lpoly@PAGEOFF
ldr x12,[x13,#8]
ldr x13,[x13,#24]
orr x8,x4,x5
orr x10,x6,x7
orr x25,x8,x10
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z);
ldp x4,x5,[x22,#64] // in1_z
ldp x6,x7,[x22,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x2,x23,#64
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x22,#64
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#32]
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x2,x22,#32
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#352]
ldp x6,x7,[sp,#352+16]
add x2,x23,#32
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,sp,#320
ldr x3,[sp,#192] // forward load for p256_mul_mont
ldp x4,x5,[x22]
ldp x6,x7,[x22,#16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x26,x14,x16 // ~is_equal(S1,S2)
add x2,sp,#192
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr);
ldr x3,[sp,#128]
ldp x4,x5,[x23]
ldp x6,x7,[x23,#16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr);
add x2,sp,#256
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x14,x14,x16 // ~is_equal(U1,U2)
mvn x27,x24 // -1/0 -> 0/-1
mvn x28,x25 // -1/0 -> 0/-1
orr x14,x14,x27
orr x14,x14,x28
orr x14,x14,x26
cbnz x14,Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2))
Ladd_double:
mov x1,x22
mov x0,x21
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames
b Ldouble_shortcut
.align 4
Ladd_proceed:
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#64]
ldp x6,x7,[sp,#64+16]
add x2,x23,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z);
ldr x3,[sp,#96]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,sp,#96
add x0,sp,#224
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[sp,#128]
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#128
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#192
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#224
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#288
ldr x3,[sp,#224] // forward load for p256_mul_mont
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,sp,#224
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#160
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#352
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
Ladd_done:
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _ecp_nistz256_point_add_affine
.private_extern _ecp_nistz256_point_add_affine
.align 5
_ecp_nistz256_point_add_affine:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-80]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
sub sp,sp,#32*10
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,Lpoly@PAGE
add x13,x13,Lpoly@PAGEOFF
ldr x12,[x13,#8]
ldr x13,[x13,#24]
ldp x4,x5,[x1,#64] // in1_z
ldp x6,x7,[x1,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
ldp x14,x15,[x2] // in2_x
ldp x16,x17,[x2,#16]
ldp x8,x9,[x2,#32] // in2_y
ldp x10,x11,[x2,#48]
orr x14,x14,x15
orr x16,x16,x17
orr x8,x8,x9
orr x10,x10,x11
orr x14,x14,x16
orr x8,x8,x10
orr x25,x14,x8
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
mov x4,x14
mov x5,x15
mov x6,x16
mov x7,x17
ldr x3,[x23]
add x2,x23,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x);
add x2,x22,#0
ldr x3,[x22,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x);
add x2,x22,#64
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#160]
ldp x6,x7,[sp,#160+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x23,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,x22,#32
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#192
bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y);
add x0,sp,#224
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x0,sp,#288
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,sp,#160
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[x22]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,x22,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#224
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#288
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#256
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#96
ldr x3,[x22,#32] // forward load for p256_mul_mont
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,x22,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub);
ldr x3,[sp,#192]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#192
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#128
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
adrp x23,Lone_mont@PAGE-64
add x23,x23,Lone_mont@PAGEOFF-64
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x29,x30,[sp],#80
AARCH64_VALIDATE_LINK_REGISTER
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
// uint64_t b[4]);
.globl _ecp_nistz256_ord_mul_mont
.private_extern _ecp_nistz256_ord_mul_mont
.align 4
_ecp_nistz256_ord_mul_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,Lord@PAGE
add x23,x23,Lord@PAGEOFF
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x19,x7,x3
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts of multiplication
adcs x16,x16,x9
adcs x17,x17,x10
adc x19,x19,xzr
mov x20,xzr
ldr x3,[x2,#8*1] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*2] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*3] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
lsl x8,x24,#32 // last reduction
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
// uint64_t rep);
.globl _ecp_nistz256_ord_sqr_mont
.private_extern _ecp_nistz256_ord_sqr_mont
.align 4
_ecp_nistz256_ord_sqr_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,Lord@PAGE
add x23,x23,Lord@PAGEOFF
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
b Loop_ord_sqr
.align 4
Loop_ord_sqr:
sub x2,x2,#1
////////////////////////////////////////////////////////////////
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x3,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
mul x24,x14,x23
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
adcs x1,x1,x11
adc x3,x3,x7
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
mul x24,x14,x23
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x3
adc x19,xzr,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x5,x15,x9,lo
csel x6,x16,x10,lo
csel x7,x17,x11,lo
cbnz x2,Loop_ord_sqr
stp x4,x5,[x0]
stp x6,x7,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index);
.globl _ecp_nistz256_select_w5
.private_extern _ecp_nistz256_select_w5
.align 4
_ecp_nistz256_select_w5:
AARCH64_VALID_CALL_TARGET
// x10 := x0
// w9 := 0; loop counter and incremented internal index
mov x10, x0
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
movi v20.16b, #0
movi v21.16b, #0
Lselect_w5_loop:
// Loop 16 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// continue loading ...
ld1 {v26.2d, v27.2d}, [x1],#32
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
bit v20.16b, v26.16b, v3.16b
bit v21.16b, v27.16b, v3.16b
// If bit #4 is not 0 (i.e. idx_ctr < 16) loop back
tbz w9, #4, Lselect_w5_loop
// Write [v16-v21] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64
st1 {v20.2d, v21.2d}, [x10]
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index);
.globl _ecp_nistz256_select_w7
.private_extern _ecp_nistz256_select_w7
.align 4
_ecp_nistz256_select_w7:
AARCH64_VALID_CALL_TARGET
// w9 := 0; loop counter and incremented internal index
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
Lselect_w7_loop:
// Loop 64 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
// If bit #6 is not 0 (i.e. idx_ctr < 64) loop back
tbz w9, #6, Lselect_w7_loop
// Write [v16-v19] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0]
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 73,972
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha20_poly1305_armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.section __TEXT,__const
.align 7
Lchacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
Linc:
.long 1,2,3,4
Lrol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
Lclamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.text
.align 6
Lpoly_hash_ad_internal:
.cfi_startproc
cbnz x4, Lpoly_hash_intro
ret
Lpoly_hash_intro:
cmp x4, #16
b.lt Lpoly_hash_ad_tail
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b Lpoly_hash_ad_internal
Lpoly_hash_ad_tail:
cbz x4, Lpoly_hash_ad_ret
eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD
sub x4, x4, #1
Lpoly_hash_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, x4]
mov v20.b[0], w11
subs x4, x4, #1
b.ge Lpoly_hash_tail_16_compose
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lpoly_hash_ad_ret:
ret
.cfi_endproc
/////////////////////////////////
//
// void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data);
//
.globl _chacha20_poly1305_seal
.private_extern _chacha20_poly1305_seal
.align 6
_chacha20_poly1305_seal:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, Lchacha20_consts@PAGE
add x11, x11, Lchacha20_consts@PAGEOFF
ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
ldr x12, [x5, #56] // The total cipher text length includes extra_in_len
add x12, x12, x2
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x12
cmp x2, #128
b.le Lseal_128 // Optimization for smaller buffers
// Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext,
// and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically,
// the fifth block (A4-D4) horizontally.
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
sub x5, x5, #32
mov x6, #10
.align 5
Lseal_init_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.hi Lseal_init_rounds
add v15.4s, v15.4s, v25.4s
mov x11, #4
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
and v4.16b, v4.16b, v27.16b
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
mov x16, v4.d[0] // Move the R key to GPRs
mov x17, v4.d[1]
mov v27.16b, v9.16b // Store the S key
bl Lpoly_hash_ad_internal
mov x3, x0
cmp x2, #256
b.le Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #256
mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds
mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256
Lseal_main_loop:
adrp x11, Lchacha20_consts@PAGE
add x11, x11, Lchacha20_consts@PAGEOFF
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
sub x5, x5, #32
.align 5
Lseal_main_loop_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.ge Lseal_main_loop_rounds
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
subs x7, x7, #1
b.gt Lseal_main_loop_rounds
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
cmp x2, #320
b.le Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #320
mov x6, #0
mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration
b Lseal_main_loop
Lseal_tail:
// This part of the function handles the storage and authentication of the last [0,320) bytes
// We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data.
cmp x2, #64
b.lt Lseal_tail_64
// Store and authenticate 64B blocks per iteration
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
// Shift the state left by 64 bytes for the next iteration of the loop
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
mov v1.16b, v2.16b
mov v6.16b, v7.16b
mov v11.16b, v12.16b
mov v16.16b, v17.16b
mov v2.16b, v3.16b
mov v7.16b, v8.16b
mov v12.16b, v13.16b
mov v17.16b, v18.16b
mov v3.16b, v4.16b
mov v8.16b, v9.16b
mov v13.16b, v14.16b
mov v18.16b, v19.16b
b Lseal_tail
Lseal_tail_64:
ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr
// Here we handle the last [0,64) bytes of plaintext
cmp x2, #16
b.lt Lseal_tail_16
// Each iteration encrypt and authenticate a 16B block
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b}, [x0], #16
sub x2, x2, #16
// Shift the state left by 16 bytes for the next iteration of the loop
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
b Lseal_tail_64
Lseal_tail_16:
// Here we handle the last [0,16) bytes of ciphertext that require a padded block
cbz x2, Lseal_hash_extra
eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes
not v22.16b, v20.16b
mov x6, x2
add x1, x1, x2
cbz x4, Lseal_tail_16_compose // No extra data to pad with, zero padding
mov x7, #16 // We need to load some extra_in first for padding
sub x7, x7, x2
cmp x4, x7
csel x7, x4, x7, lt // Load the minimum of extra_in_len and the amount needed to fill the register
mov x12, x7
add x3, x3, x7
sub x4, x4, x7
Lseal_tail16_compose_extra_in:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x7, x7, #1
b.gt Lseal_tail16_compose_extra_in
add x3, x3, x12
Lseal_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x1, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt Lseal_tail_16_compose
and v0.16b, v0.16b, v21.16b
eor v20.16b, v20.16b, v0.16b
mov v21.16b, v20.16b
Lseal_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt Lseal_tail_16_store
// Hash in the final ct block concatenated with extra_in
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lseal_hash_extra:
cbz x4, Lseal_finalize
Lseal_hash_extra_loop:
cmp x4, #16
b.lt Lseal_hash_extra_tail
ld1 {v20.16b}, [x3], #16
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b Lseal_hash_extra_loop
Lseal_hash_extra_tail:
cbz x4, Lseal_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext
add x3, x3, x4
Lseal_hash_extra_load:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x4, x4, #1
b.gt Lseal_hash_extra_load
// Hash in the final padded extra_in blcok
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lseal_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
Lseal_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
Lseal_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi Lseal_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
// Only the first 32 bytes of the third block (counter = 0) are needed,
// so skip updating v12 and v17.
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl Lpoly_hash_ad_internal
b Lseal_tail
.cfi_endproc
/////////////////////////////////
//
// void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data);
//
.globl _chacha20_poly1305_open
.private_extern _chacha20_poly1305_open
.align 6
_chacha20_poly1305_open:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, Lchacha20_consts@PAGE
add x11, x11, Lchacha20_consts@PAGEOFF
ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x2
cmp x2, #128
b.le Lopen_128 // Optimization for smaller buffers
// Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
mov x6, #10
.align 5
Lopen_init_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.hi Lopen_init_rounds
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
and v0.16b, v0.16b, v27.16b
mov x16, v0.d[0] // Move the R key to GPRs
mov x17, v0.d[1]
mov v27.16b, v5.16b // Store the S key
bl Lpoly_hash_ad_internal
Lopen_ad_done:
mov x3, x1
// Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes
Lopen_main_loop:
cmp x2, #192
b.lt Lopen_tail
adrp x11, Lchacha20_consts@PAGE
add x11, x11, Lchacha20_consts@PAGEOFF
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
sub x5, x5, #32
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12
sub x4, x4, #10
mov x7, #10
subs x6, x7, x4
subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full
cbz x7, Lopen_main_loop_rounds_short
.align 5
Lopen_main_loop_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lopen_main_loop_rounds_short:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x7, x7, #1
b.gt Lopen_main_loop_rounds
subs x6, x6, #1
b.ge Lopen_main_loop_rounds_short
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
// We can always safely store 192 bytes
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #192
mov v0.16b, v3.16b
mov v5.16b, v8.16b
mov v10.16b, v13.16b
mov v15.16b, v18.16b
cmp x2, #64
b.lt Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v4.16b
mov v5.16b, v9.16b
mov v10.16b, v14.16b
mov v15.16b, v19.16b
cmp x2, #64
b.lt Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b Lopen_main_loop
Lopen_tail:
cbz x2, Lopen_finalize
lsr x4, x2, #4 // How many whole blocks we have to hash
cmp x2, #64
b.le Lopen_tail_64
cmp x2, #128
b.le Lopen_tail_128
Lopen_tail_192:
// We need three more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
mov v17.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v21.16b, v21.16b, v21.16b
ins v23.s[0], v25.s[0]
ins v21.d[0], x15
add v22.4s, v23.4s, v21.4s
add v21.4s, v22.4s, v21.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
mov x7, #10
subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing
sub x4, x4, x7
cbz x7, Lopen_tail_192_rounds_no_hash
Lopen_tail_192_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lopen_tail_192_rounds_no_hash:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x7, x7, #1
b.gt Lopen_tail_192_rounds
subs x6, x6, #1
b.ge Lopen_tail_192_rounds_no_hash
// We hashed 160 bytes at most, may still have 32 bytes left
Lopen_tail_192_hash:
cbz x4, Lopen_tail_192_hash_done
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b Lopen_tail_192_hash
Lopen_tail_192_hash_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v12.4s, v12.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v17.4s, v17.4s, v30.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #128
b Lopen_tail_64_store
Lopen_tail_128:
// We need two more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v22.16b, v22.16b, v22.16b
ins v23.s[0], v25.s[0]
ins v22.d[0], x15
add v22.4s, v22.4s, v23.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
mov x6, #10
sub x6, x6, x4
Lopen_tail_128_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #4
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #12
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #4
subs x6, x6, #1
b.gt Lopen_tail_128_rounds
cbz x4, Lopen_tail_128_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b Lopen_tail_128_rounds
Lopen_tail_128_rounds_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b Lopen_tail_64_store
Lopen_tail_64:
// We just need a single block
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
ins v23.s[0], v25.s[0]
add v15.4s, v15.4s, v23.4s
mov x6, #10
sub x6, x6, x4
Lopen_tail_64_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.gt Lopen_tail_64_rounds
cbz x4, Lopen_tail_64_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b Lopen_tail_64_rounds
Lopen_tail_64_rounds_done:
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v15.4s, v15.4s, v23.4s
Lopen_tail_64_store:
cmp x2, #16
b.lt Lopen_tail_16
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
st1 {v20.16b}, [x0], #16
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
sub x2, x2, #16
b Lopen_tail_64_store
Lopen_tail_16:
// Here we handle the last [0,16) bytes that require a padded block
cbz x2, Lopen_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask
not v22.16b, v20.16b
add x7, x1, x2
mov x6, x2
Lopen_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x7, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt Lopen_tail_16_compose
and v20.16b, v20.16b, v21.16b
// Hash in the final padded block
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
Lopen_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt Lopen_tail_16_store
Lopen_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
Lopen_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
Lopen_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi Lopen_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl Lpoly_hash_ad_internal
Lopen_128_store:
cmp x2, #64
b.lt Lopen_128_store_64
ld1 {v20.16b - v23.16b}, [x1], #64
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
Lopen_128_store_64:
lsr x4, x2, #4
mov x3, x1
Lopen_128_hash_64:
cbz x4, Lopen_tail_64_store
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b Lopen_128_hash_64
.cfi_endproc
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 40,202
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha-armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.section .rodata
.align 5
Lsigma:
.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
Lone:
.long 1,0,0,0
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
.globl ChaCha20_ctr32_nohw
.def ChaCha20_ctr32_nohw
.type 32
.endef
.align 5
ChaCha20_ctr32_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma
add x5,x5,:lo12:Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ldp x28,x30,[x4] // load counter
#ifdef __AARCH64EB__
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
Loop_outer:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov w7,w23
lsr x8,x23,#32
mov w9,w24
lsr x10,x24,#32
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#64
Loop:
sub x4,x4,#1
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
ror w21,w21,#16
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#20
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
ror w21,w21,#24
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#25
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#16
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
ror w9,w9,#20
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#24
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
ror w9,w9,#25
cbnz x4,Loop
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
b.lo Ltail
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.hi Loop_outer
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 4
Ltail:
add x2,x2,#64
Less_than_64:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
stp x5,x7,[sp,#0]
stp x9,x11,[sp,#16]
stp x13,x15,[sp,#32]
stp x17,x20,[sp,#48]
Loop_tail:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl ChaCha20_ctr32_neon
.def ChaCha20_ctr32_neon
.type 32
.endef
.align 5
ChaCha20_ctr32_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma
add x5,x5,:lo12:Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
cmp x2,#512
b.hs L512_or_more_neon
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
Loop_outer_neon:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov v0.16b,v24.16b
mov w7,w23
lsr x8,x23,#32
mov v4.16b,v24.16b
mov w9,w24
lsr x10,x24,#32
mov v16.16b,v24.16b
mov w11,w25
mov v1.16b,v25.16b
lsr x12,x25,#32
mov v5.16b,v25.16b
mov w13,w26
mov v17.16b,v25.16b
lsr x14,x26,#32
mov v3.16b,v27.16b
mov w15,w27
mov v7.16b,v28.16b
lsr x16,x27,#32
mov v19.16b,v29.16b
mov w17,w28
mov v2.16b,v26.16b
lsr x19,x28,#32
mov v6.16b,v26.16b
mov w20,w30
mov v18.16b,v26.16b
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#256
Loop_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v16.4s,v16.4s,v17.4s
add w7,w7,w11
eor v3.16b,v3.16b,v0.16b
add w8,w8,w12
eor v7.16b,v7.16b,v4.16b
eor w17,w17,w5
eor v19.16b,v19.16b,v16.16b
eor w19,w19,w6
rev32 v3.8h,v3.8h
eor w20,w20,w7
rev32 v7.8h,v7.8h
eor w21,w21,w8
rev32 v19.8h,v19.8h
ror w17,w17,#16
add v2.4s,v2.4s,v3.4s
ror w19,w19,#16
add v6.4s,v6.4s,v7.4s
ror w20,w20,#16
add v18.4s,v18.4s,v19.4s
ror w21,w21,#16
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#20
add w16,w16,w21
ushr v5.4s,v21.4s,#20
eor w9,w9,w13
ushr v17.4s,v22.4s,#20
eor w10,w10,w14
sli v1.4s,v20.4s,#12
eor w11,w11,w15
sli v5.4s,v21.4s,#12
eor w12,w12,w16
sli v17.4s,v22.4s,#12
ror w9,w9,#20
add v0.4s,v0.4s,v1.4s
ror w10,w10,#20
add v4.4s,v4.4s,v5.4s
ror w11,w11,#20
add v16.4s,v16.4s,v17.4s
ror w12,w12,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w9
eor v21.16b,v7.16b,v4.16b
add w6,w6,w10
eor v22.16b,v19.16b,v16.16b
add w7,w7,w11
ushr v3.4s,v20.4s,#24
add w8,w8,w12
ushr v7.4s,v21.4s,#24
eor w17,w17,w5
ushr v19.4s,v22.4s,#24
eor w19,w19,w6
sli v3.4s,v20.4s,#8
eor w20,w20,w7
sli v7.4s,v21.4s,#8
eor w21,w21,w8
sli v19.4s,v22.4s,#8
ror w17,w17,#24
add v2.4s,v2.4s,v3.4s
ror w19,w19,#24
add v6.4s,v6.4s,v7.4s
ror w20,w20,#24
add v18.4s,v18.4s,v19.4s
ror w21,w21,#24
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#25
add w16,w16,w21
ushr v5.4s,v21.4s,#25
eor w9,w9,w13
ushr v17.4s,v22.4s,#25
eor w10,w10,w14
sli v1.4s,v20.4s,#7
eor w11,w11,w15
sli v5.4s,v21.4s,#7
eor w12,w12,w16
sli v17.4s,v22.4s,#7
ror w9,w9,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w10,w10,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w10
add v4.4s,v4.4s,v5.4s
add w6,w6,w11
add v16.4s,v16.4s,v17.4s
add w7,w7,w12
eor v3.16b,v3.16b,v0.16b
add w8,w8,w9
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w5
eor v19.16b,v19.16b,v16.16b
eor w17,w17,w6
rev32 v3.8h,v3.8h
eor w19,w19,w7
rev32 v7.8h,v7.8h
eor w20,w20,w8
rev32 v19.8h,v19.8h
ror w21,w21,#16
add v2.4s,v2.4s,v3.4s
ror w17,w17,#16
add v6.4s,v6.4s,v7.4s
ror w19,w19,#16
add v18.4s,v18.4s,v19.4s
ror w20,w20,#16
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#20
add w14,w14,w20
ushr v5.4s,v21.4s,#20
eor w10,w10,w15
ushr v17.4s,v22.4s,#20
eor w11,w11,w16
sli v1.4s,v20.4s,#12
eor w12,w12,w13
sli v5.4s,v21.4s,#12
eor w9,w9,w14
sli v17.4s,v22.4s,#12
ror w10,w10,#20
add v0.4s,v0.4s,v1.4s
ror w11,w11,#20
add v4.4s,v4.4s,v5.4s
ror w12,w12,#20
add v16.4s,v16.4s,v17.4s
ror w9,w9,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w10
eor v21.16b,v7.16b,v4.16b
add w6,w6,w11
eor v22.16b,v19.16b,v16.16b
add w7,w7,w12
ushr v3.4s,v20.4s,#24
add w8,w8,w9
ushr v7.4s,v21.4s,#24
eor w21,w21,w5
ushr v19.4s,v22.4s,#24
eor w17,w17,w6
sli v3.4s,v20.4s,#8
eor w19,w19,w7
sli v7.4s,v21.4s,#8
eor w20,w20,w8
sli v19.4s,v22.4s,#8
ror w21,w21,#24
add v2.4s,v2.4s,v3.4s
ror w17,w17,#24
add v6.4s,v6.4s,v7.4s
ror w19,w19,#24
add v18.4s,v18.4s,v19.4s
ror w20,w20,#24
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#25
add w14,w14,w20
ushr v5.4s,v21.4s,#25
eor w10,w10,w15
ushr v17.4s,v22.4s,#25
eor w11,w11,w16
sli v1.4s,v20.4s,#7
eor w12,w12,w13
sli v5.4s,v21.4s,#7
eor w9,w9,w14
sli v17.4s,v22.4s,#7
ror w10,w10,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w11,w11,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w12,w12,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
cbnz x4,Loop_neon
add w5,w5,w22 // accumulate key block
add v0.4s,v0.4s,v24.4s
add x6,x6,x22,lsr#32
add v4.4s,v4.4s,v24.4s
add w7,w7,w23
add v16.4s,v16.4s,v24.4s
add x8,x8,x23,lsr#32
add v2.4s,v2.4s,v26.4s
add w9,w9,w24
add v6.4s,v6.4s,v26.4s
add x10,x10,x24,lsr#32
add v18.4s,v18.4s,v26.4s
add w11,w11,w25
add v3.4s,v3.4s,v27.4s
add x12,x12,x25,lsr#32
add w13,w13,w26
add v7.4s,v7.4s,v28.4s
add x14,x14,x26,lsr#32
add w15,w15,w27
add v19.4s,v19.4s,v29.4s
add x16,x16,x27,lsr#32
add w17,w17,w28
add v1.4s,v1.4s,v25.4s
add x19,x19,x28,lsr#32
add w20,w20,w30
add v5.4s,v5.4s,v25.4s
add x21,x21,x30,lsr#32
add v17.4s,v17.4s,v25.4s
b.lo Ltail_neon
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v20.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v21.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v22.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v23.16b
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
add v27.4s,v27.4s,v31.4s // += 4
stp x13,x15,[x0,#32]
add v28.4s,v28.4s,v31.4s
stp x17,x20,[x0,#48]
add v29.4s,v29.4s,v31.4s
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
eor v16.16b,v16.16b,v0.16b
eor v17.16b,v17.16b,v1.16b
eor v18.16b,v18.16b,v2.16b
eor v19.16b,v19.16b,v3.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
b.hi Loop_outer_neon
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
Ltail_neon:
add x2,x2,#256
cmp x2,#64
b.lo Less_than_64
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_128
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v0.16b,v0.16b,v20.16b
eor v1.16b,v1.16b,v21.16b
eor v2.16b,v2.16b,v22.16b
eor v3.16b,v3.16b,v23.16b
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_192
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp]
b Last_neon
Less_than_128:
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp]
b Last_neon
Less_than_192:
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp]
b Last_neon
.align 4
Last_neon:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
Loop_tail_neon:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail_neon
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
Ldone_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.def ChaCha20_512_neon
.type 32
.endef
.align 5
ChaCha20_512_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma
add x5,x5,:lo12:Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
L512_or_more_neon:
sub sp,sp,#128+64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
stp q24,q25,[sp,#0] // off-load key block, invariant part
add v27.4s,v27.4s,v31.4s // not typo
str q26,[sp,#32]
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
add v30.4s,v29.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
stp d8,d9,[sp,#128+0] // meet ABI requirements
stp d10,d11,[sp,#128+16]
stp d12,d13,[sp,#128+32]
stp d14,d15,[sp,#128+48]
sub x2,x2,#512 // not typo
Loop_outer_512_neon:
mov v0.16b,v24.16b
mov v4.16b,v24.16b
mov v8.16b,v24.16b
mov v12.16b,v24.16b
mov v16.16b,v24.16b
mov v20.16b,v24.16b
mov v1.16b,v25.16b
mov w5,w22 // unpack key block
mov v5.16b,v25.16b
lsr x6,x22,#32
mov v9.16b,v25.16b
mov w7,w23
mov v13.16b,v25.16b
lsr x8,x23,#32
mov v17.16b,v25.16b
mov w9,w24
mov v21.16b,v25.16b
lsr x10,x24,#32
mov v3.16b,v27.16b
mov w11,w25
mov v7.16b,v28.16b
lsr x12,x25,#32
mov v11.16b,v29.16b
mov w13,w26
mov v15.16b,v30.16b
lsr x14,x26,#32
mov v2.16b,v26.16b
mov w15,w27
mov v6.16b,v26.16b
lsr x16,x27,#32
add v19.4s,v3.4s,v31.4s // +4
mov w17,w28
add v23.4s,v7.4s,v31.4s // +4
lsr x19,x28,#32
mov v10.16b,v26.16b
mov w20,w30
mov v14.16b,v26.16b
lsr x21,x30,#32
mov v18.16b,v26.16b
stp q27,q28,[sp,#48] // off-load key block, variable part
mov v22.16b,v26.16b
str q29,[sp,#80]
mov x4,#5
subs x2,x2,#512
Loop_upper_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_upper_neon
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
mov w5,w22 // unpack key block
lsr x6,x22,#32
stp x9,x11,[x0,#16]
mov w7,w23
lsr x8,x23,#32
stp x13,x15,[x0,#32]
mov w9,w24
lsr x10,x24,#32
stp x17,x20,[x0,#48]
add x0,x0,#64
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#5
Loop_lower_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_lower_neon
add w5,w5,w22 // accumulate key block
ldp q24,q25,[sp,#0]
add x6,x6,x22,lsr#32
ldp q26,q27,[sp,#32]
add w7,w7,w23
ldp q28,q29,[sp,#64]
add x8,x8,x23,lsr#32
add v0.4s,v0.4s,v24.4s
add w9,w9,w24
add v4.4s,v4.4s,v24.4s
add x10,x10,x24,lsr#32
add v8.4s,v8.4s,v24.4s
add w11,w11,w25
add v12.4s,v12.4s,v24.4s
add x12,x12,x25,lsr#32
add v16.4s,v16.4s,v24.4s
add w13,w13,w26
add v20.4s,v20.4s,v24.4s
add x14,x14,x26,lsr#32
add v2.4s,v2.4s,v26.4s
add w15,w15,w27
add v6.4s,v6.4s,v26.4s
add x16,x16,x27,lsr#32
add v10.4s,v10.4s,v26.4s
add w17,w17,w28
add v14.4s,v14.4s,v26.4s
add x19,x19,x28,lsr#32
add v18.4s,v18.4s,v26.4s
add w20,w20,w30
add v22.4s,v22.4s,v26.4s
add x21,x21,x30,lsr#32
add v19.4s,v19.4s,v31.4s // +4
add x5,x5,x6,lsl#32 // pack
add v23.4s,v23.4s,v31.4s // +4
add x7,x7,x8,lsl#32
add v3.4s,v3.4s,v27.4s
ldp x6,x8,[x1,#0] // load input
add v7.4s,v7.4s,v28.4s
add x9,x9,x10,lsl#32
add v11.4s,v11.4s,v29.4s
add x11,x11,x12,lsl#32
add v15.4s,v15.4s,v30.4s
ldp x10,x12,[x1,#16]
add v19.4s,v19.4s,v27.4s
add x13,x13,x14,lsl#32
add v23.4s,v23.4s,v28.4s
add x15,x15,x16,lsl#32
add v1.4s,v1.4s,v25.4s
ldp x14,x16,[x1,#32]
add v5.4s,v5.4s,v25.4s
add x17,x17,x19,lsl#32
add v9.4s,v9.4s,v25.4s
add x20,x20,x21,lsl#32
add v13.4s,v13.4s,v25.4s
ldp x19,x21,[x1,#48]
add v17.4s,v17.4s,v25.4s
add x1,x1,#64
add v21.4s,v21.4s,v25.4s
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v24.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v25.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v26.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v27.16b
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#7 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v24.16b
eor v5.16b,v5.16b,v25.16b
eor v6.16b,v6.16b,v26.16b
eor v7.16b,v7.16b,v27.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
eor v8.16b,v8.16b,v0.16b
ldp q24,q25,[sp,#0]
eor v9.16b,v9.16b,v1.16b
ldp q26,q27,[sp,#32]
eor v10.16b,v10.16b,v2.16b
eor v11.16b,v11.16b,v3.16b
st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64
ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64
eor v12.16b,v12.16b,v4.16b
eor v13.16b,v13.16b,v5.16b
eor v14.16b,v14.16b,v6.16b
eor v15.16b,v15.16b,v7.16b
st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64
ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64
eor v16.16b,v16.16b,v8.16b
eor v17.16b,v17.16b,v9.16b
eor v18.16b,v18.16b,v10.16b
eor v19.16b,v19.16b,v11.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
shl v0.4s,v31.4s,#1 // 4 -> 8
eor v20.16b,v20.16b,v12.16b
eor v21.16b,v21.16b,v13.16b
eor v22.16b,v22.16b,v14.16b
eor v23.16b,v23.16b,v15.16b
st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64
add v27.4s,v27.4s,v0.4s // += 8
add v28.4s,v28.4s,v0.4s
add v29.4s,v29.4s,v0.4s
add v30.4s,v30.4s,v0.4s
b.hs Loop_outer_512_neon
adds x2,x2,#512
ushr v0.4s,v31.4s,#2 // 4 -> 1
ldp d8,d9,[sp,#128+0] // meet ABI requirements
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
stp q24,q31,[sp,#0] // wipe off-load area
stp q24,q31,[sp,#32]
stp q24,q31,[sp,#64]
b.eq Ldone_512_neon
cmp x2,#192
sub v27.4s,v27.4s,v0.4s // -= 1
sub v28.4s,v28.4s,v0.4s
sub v29.4s,v29.4s,v0.4s
add sp,sp,#128
b.hs Loop_outer_neon
eor v25.16b,v25.16b,v25.16b
eor v26.16b,v26.16b,v26.16b
eor v27.16b,v27.16b,v27.16b
eor v28.16b,v28.16b,v28.16b
eor v29.16b,v29.16b,v29.16b
eor v30.16b,v30.16b,v30.16b
b Loop_outer
Ldone_512_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#128+64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mktmansour/MKT-KSA-Geolocation-Security
| 31,065
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.section .rodata
.align 64
.Lzero:
.long 0,0,0,0
.Lone:
.long 1,0,0,0
.Linc:
.long 0,1,2,3
.Lfour:
.long 4,4,4,4
.Lincy:
.long 0,2,4,6,1,3,5,7
.Leight:
.long 8,8,8,8,8,8,8,8
.Lrot16:
.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
.Lrot24:
.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
.Lsigma:
.byte 101,120,112,97,110,100,32,51,50,45,98,121,116,101,32,107,0
.align 64
.Lzeroz:
.long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
.Lfourz:
.long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
.Lincz:
.long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.Lsixteen:
.long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl ChaCha20_ctr32_nohw
.hidden ChaCha20_ctr32_nohw
.type ChaCha20_ctr32_nohw,@function
.align 64
ChaCha20_ctr32_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset r15,-56
subq $64+24,%rsp
.cfi_adjust_cfa_offset 88
.Lctr32_body:
movdqu (%rcx),%xmm1
movdqu 16(%rcx),%xmm2
movdqu (%r8),%xmm3
movdqa .Lone(%rip),%xmm4
movdqa %xmm1,16(%rsp)
movdqa %xmm2,32(%rsp)
movdqa %xmm3,48(%rsp)
movq %rdx,%rbp
jmp .Loop_outer
.align 32
.Loop_outer:
movl $0x61707865,%eax
movl $0x3320646e,%ebx
movl $0x79622d32,%ecx
movl $0x6b206574,%edx
movl 16(%rsp),%r8d
movl 20(%rsp),%r9d
movl 24(%rsp),%r10d
movl 28(%rsp),%r11d
movd %xmm3,%r12d
movl 52(%rsp),%r13d
movl 56(%rsp),%r14d
movl 60(%rsp),%r15d
movq %rbp,64+0(%rsp)
movl $10,%ebp
movq %rsi,64+8(%rsp)
.byte 102,72,15,126,214
movq %rdi,64+16(%rsp)
movq %rsi,%rdi
shrq $32,%rdi
jmp .Loop
.align 32
.Loop:
addl %r8d,%eax
xorl %eax,%r12d
roll $16,%r12d
addl %r9d,%ebx
xorl %ebx,%r13d
roll $16,%r13d
addl %r12d,%esi
xorl %esi,%r8d
roll $12,%r8d
addl %r13d,%edi
xorl %edi,%r9d
roll $12,%r9d
addl %r8d,%eax
xorl %eax,%r12d
roll $8,%r12d
addl %r9d,%ebx
xorl %ebx,%r13d
roll $8,%r13d
addl %r12d,%esi
xorl %esi,%r8d
roll $7,%r8d
addl %r13d,%edi
xorl %edi,%r9d
roll $7,%r9d
movl %esi,32(%rsp)
movl %edi,36(%rsp)
movl 40(%rsp),%esi
movl 44(%rsp),%edi
addl %r10d,%ecx
xorl %ecx,%r14d
roll $16,%r14d
addl %r11d,%edx
xorl %edx,%r15d
roll $16,%r15d
addl %r14d,%esi
xorl %esi,%r10d
roll $12,%r10d
addl %r15d,%edi
xorl %edi,%r11d
roll $12,%r11d
addl %r10d,%ecx
xorl %ecx,%r14d
roll $8,%r14d
addl %r11d,%edx
xorl %edx,%r15d
roll $8,%r15d
addl %r14d,%esi
xorl %esi,%r10d
roll $7,%r10d
addl %r15d,%edi
xorl %edi,%r11d
roll $7,%r11d
addl %r9d,%eax
xorl %eax,%r15d
roll $16,%r15d
addl %r10d,%ebx
xorl %ebx,%r12d
roll $16,%r12d
addl %r15d,%esi
xorl %esi,%r9d
roll $12,%r9d
addl %r12d,%edi
xorl %edi,%r10d
roll $12,%r10d
addl %r9d,%eax
xorl %eax,%r15d
roll $8,%r15d
addl %r10d,%ebx
xorl %ebx,%r12d
roll $8,%r12d
addl %r15d,%esi
xorl %esi,%r9d
roll $7,%r9d
addl %r12d,%edi
xorl %edi,%r10d
roll $7,%r10d
movl %esi,40(%rsp)
movl %edi,44(%rsp)
movl 32(%rsp),%esi
movl 36(%rsp),%edi
addl %r11d,%ecx
xorl %ecx,%r13d
roll $16,%r13d
addl %r8d,%edx
xorl %edx,%r14d
roll $16,%r14d
addl %r13d,%esi
xorl %esi,%r11d
roll $12,%r11d
addl %r14d,%edi
xorl %edi,%r8d
roll $12,%r8d
addl %r11d,%ecx
xorl %ecx,%r13d
roll $8,%r13d
addl %r8d,%edx
xorl %edx,%r14d
roll $8,%r14d
addl %r13d,%esi
xorl %esi,%r11d
roll $7,%r11d
addl %r14d,%edi
xorl %edi,%r8d
roll $7,%r8d
decl %ebp
jnz .Loop
movl %edi,36(%rsp)
movl %esi,32(%rsp)
movq 64(%rsp),%rbp
movdqa %xmm2,%xmm1
movq 64+8(%rsp),%rsi
paddd %xmm4,%xmm3
movq 64+16(%rsp),%rdi
addl $0x61707865,%eax
addl $0x3320646e,%ebx
addl $0x79622d32,%ecx
addl $0x6b206574,%edx
addl 16(%rsp),%r8d
addl 20(%rsp),%r9d
addl 24(%rsp),%r10d
addl 28(%rsp),%r11d
addl 48(%rsp),%r12d
addl 52(%rsp),%r13d
addl 56(%rsp),%r14d
addl 60(%rsp),%r15d
paddd 32(%rsp),%xmm1
cmpq $64,%rbp
jb .Ltail
xorl 0(%rsi),%eax
xorl 4(%rsi),%ebx
xorl 8(%rsi),%ecx
xorl 12(%rsi),%edx
xorl 16(%rsi),%r8d
xorl 20(%rsi),%r9d
xorl 24(%rsi),%r10d
xorl 28(%rsi),%r11d
movdqu 32(%rsi),%xmm0
xorl 48(%rsi),%r12d
xorl 52(%rsi),%r13d
xorl 56(%rsi),%r14d
xorl 60(%rsi),%r15d
leaq 64(%rsi),%rsi
pxor %xmm1,%xmm0
movdqa %xmm2,32(%rsp)
movd %xmm3,48(%rsp)
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
movdqu %xmm0,32(%rdi)
movl %r12d,48(%rdi)
movl %r13d,52(%rdi)
movl %r14d,56(%rdi)
movl %r15d,60(%rdi)
leaq 64(%rdi),%rdi
subq $64,%rbp
jnz .Loop_outer
jmp .Ldone
.align 16
.Ltail:
movl %eax,0(%rsp)
movl %ebx,4(%rsp)
xorq %rbx,%rbx
movl %ecx,8(%rsp)
movl %edx,12(%rsp)
movl %r8d,16(%rsp)
movl %r9d,20(%rsp)
movl %r10d,24(%rsp)
movl %r11d,28(%rsp)
movdqa %xmm1,32(%rsp)
movl %r12d,48(%rsp)
movl %r13d,52(%rsp)
movl %r14d,56(%rsp)
movl %r15d,60(%rsp)
.Loop_tail:
movzbl (%rsi,%rbx,1),%eax
movzbl (%rsp,%rbx,1),%edx
leaq 1(%rbx),%rbx
xorl %edx,%eax
movb %al,-1(%rdi,%rbx,1)
decq %rbp
jnz .Loop_tail
.Ldone:
leaq 64+24+48(%rsp),%rsi
movq -48(%rsi),%r15
.cfi_restore r15
movq -40(%rsi),%r14
.cfi_restore r14
movq -32(%rsi),%r13
.cfi_restore r13
movq -24(%rsi),%r12
.cfi_restore r12
movq -16(%rsi),%rbp
.cfi_restore rbp
movq -8(%rsi),%rbx
.cfi_restore rbx
leaq (%rsi),%rsp
.cfi_adjust_cfa_offset -136
.Lno_data:
ret
.cfi_endproc
.size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw
.globl ChaCha20_ctr32_ssse3_4x
.hidden ChaCha20_ctr32_ssse3_4x
.type ChaCha20_ctr32_ssse3_4x,@function
.align 32
ChaCha20_ctr32_ssse3_4x:
.cfi_startproc
_CET_ENDBR
movq %rsp,%r9
.cfi_def_cfa_register r9
subq $0x140+8,%rsp
movdqa .Lsigma(%rip),%xmm11
movdqu (%rcx),%xmm15
movdqu 16(%rcx),%xmm7
movdqu (%r8),%xmm3
leaq 256(%rsp),%rcx
leaq .Lrot16(%rip),%r10
leaq .Lrot24(%rip),%r11
pshufd $0x00,%xmm11,%xmm8
pshufd $0x55,%xmm11,%xmm9
movdqa %xmm8,64(%rsp)
pshufd $0xaa,%xmm11,%xmm10
movdqa %xmm9,80(%rsp)
pshufd $0xff,%xmm11,%xmm11
movdqa %xmm10,96(%rsp)
movdqa %xmm11,112(%rsp)
pshufd $0x00,%xmm15,%xmm12
pshufd $0x55,%xmm15,%xmm13
movdqa %xmm12,128-256(%rcx)
pshufd $0xaa,%xmm15,%xmm14
movdqa %xmm13,144-256(%rcx)
pshufd $0xff,%xmm15,%xmm15
movdqa %xmm14,160-256(%rcx)
movdqa %xmm15,176-256(%rcx)
pshufd $0x00,%xmm7,%xmm4
pshufd $0x55,%xmm7,%xmm5
movdqa %xmm4,192-256(%rcx)
pshufd $0xaa,%xmm7,%xmm6
movdqa %xmm5,208-256(%rcx)
pshufd $0xff,%xmm7,%xmm7
movdqa %xmm6,224-256(%rcx)
movdqa %xmm7,240-256(%rcx)
pshufd $0x00,%xmm3,%xmm0
pshufd $0x55,%xmm3,%xmm1
paddd .Linc(%rip),%xmm0
pshufd $0xaa,%xmm3,%xmm2
movdqa %xmm1,272-256(%rcx)
pshufd $0xff,%xmm3,%xmm3
movdqa %xmm2,288-256(%rcx)
movdqa %xmm3,304-256(%rcx)
jmp .Loop_enter4x
.align 32
.Loop_outer4x:
movdqa 64(%rsp),%xmm8
movdqa 80(%rsp),%xmm9
movdqa 96(%rsp),%xmm10
movdqa 112(%rsp),%xmm11
movdqa 128-256(%rcx),%xmm12
movdqa 144-256(%rcx),%xmm13
movdqa 160-256(%rcx),%xmm14
movdqa 176-256(%rcx),%xmm15
movdqa 192-256(%rcx),%xmm4
movdqa 208-256(%rcx),%xmm5
movdqa 224-256(%rcx),%xmm6
movdqa 240-256(%rcx),%xmm7
movdqa 256-256(%rcx),%xmm0
movdqa 272-256(%rcx),%xmm1
movdqa 288-256(%rcx),%xmm2
movdqa 304-256(%rcx),%xmm3
paddd .Lfour(%rip),%xmm0
.Loop_enter4x:
movdqa %xmm6,32(%rsp)
movdqa %xmm7,48(%rsp)
movdqa (%r10),%xmm7
movl $10,%eax
movdqa %xmm0,256-256(%rcx)
jmp .Loop4x
.align 32
.Loop4x:
paddd %xmm12,%xmm8
paddd %xmm13,%xmm9
pxor %xmm8,%xmm0
pxor %xmm9,%xmm1
.byte 102,15,56,0,199
.byte 102,15,56,0,207
paddd %xmm0,%xmm4
paddd %xmm1,%xmm5
pxor %xmm4,%xmm12
pxor %xmm5,%xmm13
movdqa %xmm12,%xmm6
pslld $12,%xmm12
psrld $20,%xmm6
movdqa %xmm13,%xmm7
pslld $12,%xmm13
por %xmm6,%xmm12
psrld $20,%xmm7
movdqa (%r11),%xmm6
por %xmm7,%xmm13
paddd %xmm12,%xmm8
paddd %xmm13,%xmm9
pxor %xmm8,%xmm0
pxor %xmm9,%xmm1
.byte 102,15,56,0,198
.byte 102,15,56,0,206
paddd %xmm0,%xmm4
paddd %xmm1,%xmm5
pxor %xmm4,%xmm12
pxor %xmm5,%xmm13
movdqa %xmm12,%xmm7
pslld $7,%xmm12
psrld $25,%xmm7
movdqa %xmm13,%xmm6
pslld $7,%xmm13
por %xmm7,%xmm12
psrld $25,%xmm6
movdqa (%r10),%xmm7
por %xmm6,%xmm13
movdqa %xmm4,0(%rsp)
movdqa %xmm5,16(%rsp)
movdqa 32(%rsp),%xmm4
movdqa 48(%rsp),%xmm5
paddd %xmm14,%xmm10
paddd %xmm15,%xmm11
pxor %xmm10,%xmm2
pxor %xmm11,%xmm3
.byte 102,15,56,0,215
.byte 102,15,56,0,223
paddd %xmm2,%xmm4
paddd %xmm3,%xmm5
pxor %xmm4,%xmm14
pxor %xmm5,%xmm15
movdqa %xmm14,%xmm6
pslld $12,%xmm14
psrld $20,%xmm6
movdqa %xmm15,%xmm7
pslld $12,%xmm15
por %xmm6,%xmm14
psrld $20,%xmm7
movdqa (%r11),%xmm6
por %xmm7,%xmm15
paddd %xmm14,%xmm10
paddd %xmm15,%xmm11
pxor %xmm10,%xmm2
pxor %xmm11,%xmm3
.byte 102,15,56,0,214
.byte 102,15,56,0,222
paddd %xmm2,%xmm4
paddd %xmm3,%xmm5
pxor %xmm4,%xmm14
pxor %xmm5,%xmm15
movdqa %xmm14,%xmm7
pslld $7,%xmm14
psrld $25,%xmm7
movdqa %xmm15,%xmm6
pslld $7,%xmm15
por %xmm7,%xmm14
psrld $25,%xmm6
movdqa (%r10),%xmm7
por %xmm6,%xmm15
paddd %xmm13,%xmm8
paddd %xmm14,%xmm9
pxor %xmm8,%xmm3
pxor %xmm9,%xmm0
.byte 102,15,56,0,223
.byte 102,15,56,0,199
paddd %xmm3,%xmm4
paddd %xmm0,%xmm5
pxor %xmm4,%xmm13
pxor %xmm5,%xmm14
movdqa %xmm13,%xmm6
pslld $12,%xmm13
psrld $20,%xmm6
movdqa %xmm14,%xmm7
pslld $12,%xmm14
por %xmm6,%xmm13
psrld $20,%xmm7
movdqa (%r11),%xmm6
por %xmm7,%xmm14
paddd %xmm13,%xmm8
paddd %xmm14,%xmm9
pxor %xmm8,%xmm3
pxor %xmm9,%xmm0
.byte 102,15,56,0,222
.byte 102,15,56,0,198
paddd %xmm3,%xmm4
paddd %xmm0,%xmm5
pxor %xmm4,%xmm13
pxor %xmm5,%xmm14
movdqa %xmm13,%xmm7
pslld $7,%xmm13
psrld $25,%xmm7
movdqa %xmm14,%xmm6
pslld $7,%xmm14
por %xmm7,%xmm13
psrld $25,%xmm6
movdqa (%r10),%xmm7
por %xmm6,%xmm14
movdqa %xmm4,32(%rsp)
movdqa %xmm5,48(%rsp)
movdqa 0(%rsp),%xmm4
movdqa 16(%rsp),%xmm5
paddd %xmm15,%xmm10
paddd %xmm12,%xmm11
pxor %xmm10,%xmm1
pxor %xmm11,%xmm2
.byte 102,15,56,0,207
.byte 102,15,56,0,215
paddd %xmm1,%xmm4
paddd %xmm2,%xmm5
pxor %xmm4,%xmm15
pxor %xmm5,%xmm12
movdqa %xmm15,%xmm6
pslld $12,%xmm15
psrld $20,%xmm6
movdqa %xmm12,%xmm7
pslld $12,%xmm12
por %xmm6,%xmm15
psrld $20,%xmm7
movdqa (%r11),%xmm6
por %xmm7,%xmm12
paddd %xmm15,%xmm10
paddd %xmm12,%xmm11
pxor %xmm10,%xmm1
pxor %xmm11,%xmm2
.byte 102,15,56,0,206
.byte 102,15,56,0,214
paddd %xmm1,%xmm4
paddd %xmm2,%xmm5
pxor %xmm4,%xmm15
pxor %xmm5,%xmm12
movdqa %xmm15,%xmm7
pslld $7,%xmm15
psrld $25,%xmm7
movdqa %xmm12,%xmm6
pslld $7,%xmm12
por %xmm7,%xmm15
psrld $25,%xmm6
movdqa (%r10),%xmm7
por %xmm6,%xmm12
decl %eax
jnz .Loop4x
paddd 64(%rsp),%xmm8
paddd 80(%rsp),%xmm9
paddd 96(%rsp),%xmm10
paddd 112(%rsp),%xmm11
movdqa %xmm8,%xmm6
punpckldq %xmm9,%xmm8
movdqa %xmm10,%xmm7
punpckldq %xmm11,%xmm10
punpckhdq %xmm9,%xmm6
punpckhdq %xmm11,%xmm7
movdqa %xmm8,%xmm9
punpcklqdq %xmm10,%xmm8
movdqa %xmm6,%xmm11
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm10,%xmm9
punpckhqdq %xmm7,%xmm11
paddd 128-256(%rcx),%xmm12
paddd 144-256(%rcx),%xmm13
paddd 160-256(%rcx),%xmm14
paddd 176-256(%rcx),%xmm15
movdqa %xmm8,0(%rsp)
movdqa %xmm9,16(%rsp)
movdqa 32(%rsp),%xmm8
movdqa 48(%rsp),%xmm9
movdqa %xmm12,%xmm10
punpckldq %xmm13,%xmm12
movdqa %xmm14,%xmm7
punpckldq %xmm15,%xmm14
punpckhdq %xmm13,%xmm10
punpckhdq %xmm15,%xmm7
movdqa %xmm12,%xmm13
punpcklqdq %xmm14,%xmm12
movdqa %xmm10,%xmm15
punpcklqdq %xmm7,%xmm10
punpckhqdq %xmm14,%xmm13
punpckhqdq %xmm7,%xmm15
paddd 192-256(%rcx),%xmm4
paddd 208-256(%rcx),%xmm5
paddd 224-256(%rcx),%xmm8
paddd 240-256(%rcx),%xmm9
movdqa %xmm6,32(%rsp)
movdqa %xmm11,48(%rsp)
movdqa %xmm4,%xmm14
punpckldq %xmm5,%xmm4
movdqa %xmm8,%xmm7
punpckldq %xmm9,%xmm8
punpckhdq %xmm5,%xmm14
punpckhdq %xmm9,%xmm7
movdqa %xmm4,%xmm5
punpcklqdq %xmm8,%xmm4
movdqa %xmm14,%xmm9
punpcklqdq %xmm7,%xmm14
punpckhqdq %xmm8,%xmm5
punpckhqdq %xmm7,%xmm9
paddd 256-256(%rcx),%xmm0
paddd 272-256(%rcx),%xmm1
paddd 288-256(%rcx),%xmm2
paddd 304-256(%rcx),%xmm3
movdqa %xmm0,%xmm8
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm8
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm8,%xmm3
punpcklqdq %xmm7,%xmm8
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
cmpq $256,%rdx
jb .Ltail4x
movdqu 0(%rsi),%xmm6
movdqu 16(%rsi),%xmm11
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm7
pxor 0(%rsp),%xmm6
pxor %xmm12,%xmm11
pxor %xmm4,%xmm2
pxor %xmm0,%xmm7
movdqu %xmm6,0(%rdi)
movdqu 64(%rsi),%xmm6
movdqu %xmm11,16(%rdi)
movdqu 80(%rsi),%xmm11
movdqu %xmm2,32(%rdi)
movdqu 96(%rsi),%xmm2
movdqu %xmm7,48(%rdi)
movdqu 112(%rsi),%xmm7
leaq 128(%rsi),%rsi
pxor 16(%rsp),%xmm6
pxor %xmm13,%xmm11
pxor %xmm5,%xmm2
pxor %xmm1,%xmm7
movdqu %xmm6,64(%rdi)
movdqu 0(%rsi),%xmm6
movdqu %xmm11,80(%rdi)
movdqu 16(%rsi),%xmm11
movdqu %xmm2,96(%rdi)
movdqu 32(%rsi),%xmm2
movdqu %xmm7,112(%rdi)
leaq 128(%rdi),%rdi
movdqu 48(%rsi),%xmm7
pxor 32(%rsp),%xmm6
pxor %xmm10,%xmm11
pxor %xmm14,%xmm2
pxor %xmm8,%xmm7
movdqu %xmm6,0(%rdi)
movdqu 64(%rsi),%xmm6
movdqu %xmm11,16(%rdi)
movdqu 80(%rsi),%xmm11
movdqu %xmm2,32(%rdi)
movdqu 96(%rsi),%xmm2
movdqu %xmm7,48(%rdi)
movdqu 112(%rsi),%xmm7
leaq 128(%rsi),%rsi
pxor 48(%rsp),%xmm6
pxor %xmm15,%xmm11
pxor %xmm9,%xmm2
pxor %xmm3,%xmm7
movdqu %xmm6,64(%rdi)
movdqu %xmm11,80(%rdi)
movdqu %xmm2,96(%rdi)
movdqu %xmm7,112(%rdi)
leaq 128(%rdi),%rdi
subq $256,%rdx
jnz .Loop_outer4x
jmp .Ldone4x
.Ltail4x:
cmpq $192,%rdx
jae .L192_or_more4x
cmpq $128,%rdx
jae .L128_or_more4x
cmpq $64,%rdx
jae .L64_or_more4x
xorq %r10,%r10
movdqa %xmm12,16(%rsp)
movdqa %xmm4,32(%rsp)
movdqa %xmm0,48(%rsp)
jmp .Loop_tail4x
.align 32
.L64_or_more4x:
movdqu 0(%rsi),%xmm6
movdqu 16(%rsi),%xmm11
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm7
pxor 0(%rsp),%xmm6
pxor %xmm12,%xmm11
pxor %xmm4,%xmm2
pxor %xmm0,%xmm7
movdqu %xmm6,0(%rdi)
movdqu %xmm11,16(%rdi)
movdqu %xmm2,32(%rdi)
movdqu %xmm7,48(%rdi)
je .Ldone4x
movdqa 16(%rsp),%xmm6
leaq 64(%rsi),%rsi
xorq %r10,%r10
movdqa %xmm6,0(%rsp)
movdqa %xmm13,16(%rsp)
leaq 64(%rdi),%rdi
movdqa %xmm5,32(%rsp)
subq $64,%rdx
movdqa %xmm1,48(%rsp)
jmp .Loop_tail4x
.align 32
.L128_or_more4x:
movdqu 0(%rsi),%xmm6
movdqu 16(%rsi),%xmm11
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm7
pxor 0(%rsp),%xmm6
pxor %xmm12,%xmm11
pxor %xmm4,%xmm2
pxor %xmm0,%xmm7
movdqu %xmm6,0(%rdi)
movdqu 64(%rsi),%xmm6
movdqu %xmm11,16(%rdi)
movdqu 80(%rsi),%xmm11
movdqu %xmm2,32(%rdi)
movdqu 96(%rsi),%xmm2
movdqu %xmm7,48(%rdi)
movdqu 112(%rsi),%xmm7
pxor 16(%rsp),%xmm6
pxor %xmm13,%xmm11
pxor %xmm5,%xmm2
pxor %xmm1,%xmm7
movdqu %xmm6,64(%rdi)
movdqu %xmm11,80(%rdi)
movdqu %xmm2,96(%rdi)
movdqu %xmm7,112(%rdi)
je .Ldone4x
movdqa 32(%rsp),%xmm6
leaq 128(%rsi),%rsi
xorq %r10,%r10
movdqa %xmm6,0(%rsp)
movdqa %xmm10,16(%rsp)
leaq 128(%rdi),%rdi
movdqa %xmm14,32(%rsp)
subq $128,%rdx
movdqa %xmm8,48(%rsp)
jmp .Loop_tail4x
.align 32
.L192_or_more4x:
movdqu 0(%rsi),%xmm6
movdqu 16(%rsi),%xmm11
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm7
pxor 0(%rsp),%xmm6
pxor %xmm12,%xmm11
pxor %xmm4,%xmm2
pxor %xmm0,%xmm7
movdqu %xmm6,0(%rdi)
movdqu 64(%rsi),%xmm6
movdqu %xmm11,16(%rdi)
movdqu 80(%rsi),%xmm11
movdqu %xmm2,32(%rdi)
movdqu 96(%rsi),%xmm2
movdqu %xmm7,48(%rdi)
movdqu 112(%rsi),%xmm7
leaq 128(%rsi),%rsi
pxor 16(%rsp),%xmm6
pxor %xmm13,%xmm11
pxor %xmm5,%xmm2
pxor %xmm1,%xmm7
movdqu %xmm6,64(%rdi)
movdqu 0(%rsi),%xmm6
movdqu %xmm11,80(%rdi)
movdqu 16(%rsi),%xmm11
movdqu %xmm2,96(%rdi)
movdqu 32(%rsi),%xmm2
movdqu %xmm7,112(%rdi)
leaq 128(%rdi),%rdi
movdqu 48(%rsi),%xmm7
pxor 32(%rsp),%xmm6
pxor %xmm10,%xmm11
pxor %xmm14,%xmm2
pxor %xmm8,%xmm7
movdqu %xmm6,0(%rdi)
movdqu %xmm11,16(%rdi)
movdqu %xmm2,32(%rdi)
movdqu %xmm7,48(%rdi)
je .Ldone4x
movdqa 48(%rsp),%xmm6
leaq 64(%rsi),%rsi
xorq %r10,%r10
movdqa %xmm6,0(%rsp)
movdqa %xmm15,16(%rsp)
leaq 64(%rdi),%rdi
movdqa %xmm9,32(%rsp)
subq $192,%rdx
movdqa %xmm3,48(%rsp)
.Loop_tail4x:
movzbl (%rsi,%r10,1),%eax
movzbl (%rsp,%r10,1),%ecx
leaq 1(%r10),%r10
xorl %ecx,%eax
movb %al,-1(%rdi,%r10,1)
decq %rdx
jnz .Loop_tail4x
.Ldone4x:
leaq (%r9),%rsp
.cfi_def_cfa_register rsp
.L4x_epilogue:
ret
.cfi_endproc
.size ChaCha20_ctr32_ssse3_4x,.-ChaCha20_ctr32_ssse3_4x
.globl ChaCha20_ctr32_avx2
.hidden ChaCha20_ctr32_avx2
.type ChaCha20_ctr32_avx2,@function
.align 32
ChaCha20_ctr32_avx2:
.cfi_startproc
_CET_ENDBR
movq %rsp,%r9
.cfi_def_cfa_register r9
subq $0x280+8,%rsp
andq $-32,%rsp
vzeroupper
vbroadcasti128 .Lsigma(%rip),%ymm11
vbroadcasti128 (%rcx),%ymm3
vbroadcasti128 16(%rcx),%ymm15
vbroadcasti128 (%r8),%ymm7
leaq 256(%rsp),%rcx
leaq 512(%rsp),%rax
leaq .Lrot16(%rip),%r10
leaq .Lrot24(%rip),%r11
vpshufd $0x00,%ymm11,%ymm8
vpshufd $0x55,%ymm11,%ymm9
vmovdqa %ymm8,128-256(%rcx)
vpshufd $0xaa,%ymm11,%ymm10
vmovdqa %ymm9,160-256(%rcx)
vpshufd $0xff,%ymm11,%ymm11
vmovdqa %ymm10,192-256(%rcx)
vmovdqa %ymm11,224-256(%rcx)
vpshufd $0x00,%ymm3,%ymm0
vpshufd $0x55,%ymm3,%ymm1
vmovdqa %ymm0,256-256(%rcx)
vpshufd $0xaa,%ymm3,%ymm2
vmovdqa %ymm1,288-256(%rcx)
vpshufd $0xff,%ymm3,%ymm3
vmovdqa %ymm2,320-256(%rcx)
vmovdqa %ymm3,352-256(%rcx)
vpshufd $0x00,%ymm15,%ymm12
vpshufd $0x55,%ymm15,%ymm13
vmovdqa %ymm12,384-512(%rax)
vpshufd $0xaa,%ymm15,%ymm14
vmovdqa %ymm13,416-512(%rax)
vpshufd $0xff,%ymm15,%ymm15
vmovdqa %ymm14,448-512(%rax)
vmovdqa %ymm15,480-512(%rax)
vpshufd $0x00,%ymm7,%ymm4
vpshufd $0x55,%ymm7,%ymm5
vpaddd .Lincy(%rip),%ymm4,%ymm4
vpshufd $0xaa,%ymm7,%ymm6
vmovdqa %ymm5,544-512(%rax)
vpshufd $0xff,%ymm7,%ymm7
vmovdqa %ymm6,576-512(%rax)
vmovdqa %ymm7,608-512(%rax)
jmp .Loop_enter8x
.align 32
.Loop_outer8x:
vmovdqa 128-256(%rcx),%ymm8
vmovdqa 160-256(%rcx),%ymm9
vmovdqa 192-256(%rcx),%ymm10
vmovdqa 224-256(%rcx),%ymm11
vmovdqa 256-256(%rcx),%ymm0
vmovdqa 288-256(%rcx),%ymm1
vmovdqa 320-256(%rcx),%ymm2
vmovdqa 352-256(%rcx),%ymm3
vmovdqa 384-512(%rax),%ymm12
vmovdqa 416-512(%rax),%ymm13
vmovdqa 448-512(%rax),%ymm14
vmovdqa 480-512(%rax),%ymm15
vmovdqa 512-512(%rax),%ymm4
vmovdqa 544-512(%rax),%ymm5
vmovdqa 576-512(%rax),%ymm6
vmovdqa 608-512(%rax),%ymm7
vpaddd .Leight(%rip),%ymm4,%ymm4
.Loop_enter8x:
vmovdqa %ymm14,64(%rsp)
vmovdqa %ymm15,96(%rsp)
vbroadcasti128 (%r10),%ymm15
vmovdqa %ymm4,512-512(%rax)
movl $10,%eax
jmp .Loop8x
.align 32
.Loop8x:
vpaddd %ymm0,%ymm8,%ymm8
vpxor %ymm4,%ymm8,%ymm4
vpshufb %ymm15,%ymm4,%ymm4
vpaddd %ymm1,%ymm9,%ymm9
vpxor %ymm5,%ymm9,%ymm5
vpshufb %ymm15,%ymm5,%ymm5
vpaddd %ymm4,%ymm12,%ymm12
vpxor %ymm0,%ymm12,%ymm0
vpslld $12,%ymm0,%ymm14
vpsrld $20,%ymm0,%ymm0
vpor %ymm0,%ymm14,%ymm0
vbroadcasti128 (%r11),%ymm14
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm13,%ymm1
vpslld $12,%ymm1,%ymm15
vpsrld $20,%ymm1,%ymm1
vpor %ymm1,%ymm15,%ymm1
vpaddd %ymm0,%ymm8,%ymm8
vpxor %ymm4,%ymm8,%ymm4
vpshufb %ymm14,%ymm4,%ymm4
vpaddd %ymm1,%ymm9,%ymm9
vpxor %ymm5,%ymm9,%ymm5
vpshufb %ymm14,%ymm5,%ymm5
vpaddd %ymm4,%ymm12,%ymm12
vpxor %ymm0,%ymm12,%ymm0
vpslld $7,%ymm0,%ymm15
vpsrld $25,%ymm0,%ymm0
vpor %ymm0,%ymm15,%ymm0
vbroadcasti128 (%r10),%ymm15
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm13,%ymm1
vpslld $7,%ymm1,%ymm14
vpsrld $25,%ymm1,%ymm1
vpor %ymm1,%ymm14,%ymm1
vmovdqa %ymm12,0(%rsp)
vmovdqa %ymm13,32(%rsp)
vmovdqa 64(%rsp),%ymm12
vmovdqa 96(%rsp),%ymm13
vpaddd %ymm2,%ymm10,%ymm10
vpxor %ymm6,%ymm10,%ymm6
vpshufb %ymm15,%ymm6,%ymm6
vpaddd %ymm3,%ymm11,%ymm11
vpxor %ymm7,%ymm11,%ymm7
vpshufb %ymm15,%ymm7,%ymm7
vpaddd %ymm6,%ymm12,%ymm12
vpxor %ymm2,%ymm12,%ymm2
vpslld $12,%ymm2,%ymm14
vpsrld $20,%ymm2,%ymm2
vpor %ymm2,%ymm14,%ymm2
vbroadcasti128 (%r11),%ymm14
vpaddd %ymm7,%ymm13,%ymm13
vpxor %ymm3,%ymm13,%ymm3
vpslld $12,%ymm3,%ymm15
vpsrld $20,%ymm3,%ymm3
vpor %ymm3,%ymm15,%ymm3
vpaddd %ymm2,%ymm10,%ymm10
vpxor %ymm6,%ymm10,%ymm6
vpshufb %ymm14,%ymm6,%ymm6
vpaddd %ymm3,%ymm11,%ymm11
vpxor %ymm7,%ymm11,%ymm7
vpshufb %ymm14,%ymm7,%ymm7
vpaddd %ymm6,%ymm12,%ymm12
vpxor %ymm2,%ymm12,%ymm2
vpslld $7,%ymm2,%ymm15
vpsrld $25,%ymm2,%ymm2
vpor %ymm2,%ymm15,%ymm2
vbroadcasti128 (%r10),%ymm15
vpaddd %ymm7,%ymm13,%ymm13
vpxor %ymm3,%ymm13,%ymm3
vpslld $7,%ymm3,%ymm14
vpsrld $25,%ymm3,%ymm3
vpor %ymm3,%ymm14,%ymm3
vpaddd %ymm1,%ymm8,%ymm8
vpxor %ymm7,%ymm8,%ymm7
vpshufb %ymm15,%ymm7,%ymm7
vpaddd %ymm2,%ymm9,%ymm9
vpxor %ymm4,%ymm9,%ymm4
vpshufb %ymm15,%ymm4,%ymm4
vpaddd %ymm7,%ymm12,%ymm12
vpxor %ymm1,%ymm12,%ymm1
vpslld $12,%ymm1,%ymm14
vpsrld $20,%ymm1,%ymm1
vpor %ymm1,%ymm14,%ymm1
vbroadcasti128 (%r11),%ymm14
vpaddd %ymm4,%ymm13,%ymm13
vpxor %ymm2,%ymm13,%ymm2
vpslld $12,%ymm2,%ymm15
vpsrld $20,%ymm2,%ymm2
vpor %ymm2,%ymm15,%ymm2
vpaddd %ymm1,%ymm8,%ymm8
vpxor %ymm7,%ymm8,%ymm7
vpshufb %ymm14,%ymm7,%ymm7
vpaddd %ymm2,%ymm9,%ymm9
vpxor %ymm4,%ymm9,%ymm4
vpshufb %ymm14,%ymm4,%ymm4
vpaddd %ymm7,%ymm12,%ymm12
vpxor %ymm1,%ymm12,%ymm1
vpslld $7,%ymm1,%ymm15
vpsrld $25,%ymm1,%ymm1
vpor %ymm1,%ymm15,%ymm1
vbroadcasti128 (%r10),%ymm15
vpaddd %ymm4,%ymm13,%ymm13
vpxor %ymm2,%ymm13,%ymm2
vpslld $7,%ymm2,%ymm14
vpsrld $25,%ymm2,%ymm2
vpor %ymm2,%ymm14,%ymm2
vmovdqa %ymm12,64(%rsp)
vmovdqa %ymm13,96(%rsp)
vmovdqa 0(%rsp),%ymm12
vmovdqa 32(%rsp),%ymm13
vpaddd %ymm3,%ymm10,%ymm10
vpxor %ymm5,%ymm10,%ymm5
vpshufb %ymm15,%ymm5,%ymm5
vpaddd %ymm0,%ymm11,%ymm11
vpxor %ymm6,%ymm11,%ymm6
vpshufb %ymm15,%ymm6,%ymm6
vpaddd %ymm5,%ymm12,%ymm12
vpxor %ymm3,%ymm12,%ymm3
vpslld $12,%ymm3,%ymm14
vpsrld $20,%ymm3,%ymm3
vpor %ymm3,%ymm14,%ymm3
vbroadcasti128 (%r11),%ymm14
vpaddd %ymm6,%ymm13,%ymm13
vpxor %ymm0,%ymm13,%ymm0
vpslld $12,%ymm0,%ymm15
vpsrld $20,%ymm0,%ymm0
vpor %ymm0,%ymm15,%ymm0
vpaddd %ymm3,%ymm10,%ymm10
vpxor %ymm5,%ymm10,%ymm5
vpshufb %ymm14,%ymm5,%ymm5
vpaddd %ymm0,%ymm11,%ymm11
vpxor %ymm6,%ymm11,%ymm6
vpshufb %ymm14,%ymm6,%ymm6
vpaddd %ymm5,%ymm12,%ymm12
vpxor %ymm3,%ymm12,%ymm3
vpslld $7,%ymm3,%ymm15
vpsrld $25,%ymm3,%ymm3
vpor %ymm3,%ymm15,%ymm3
vbroadcasti128 (%r10),%ymm15
vpaddd %ymm6,%ymm13,%ymm13
vpxor %ymm0,%ymm13,%ymm0
vpslld $7,%ymm0,%ymm14
vpsrld $25,%ymm0,%ymm0
vpor %ymm0,%ymm14,%ymm0
decl %eax
jnz .Loop8x
leaq 512(%rsp),%rax
vpaddd 128-256(%rcx),%ymm8,%ymm8
vpaddd 160-256(%rcx),%ymm9,%ymm9
vpaddd 192-256(%rcx),%ymm10,%ymm10
vpaddd 224-256(%rcx),%ymm11,%ymm11
vpunpckldq %ymm9,%ymm8,%ymm14
vpunpckldq %ymm11,%ymm10,%ymm15
vpunpckhdq %ymm9,%ymm8,%ymm8
vpunpckhdq %ymm11,%ymm10,%ymm10
vpunpcklqdq %ymm15,%ymm14,%ymm9
vpunpckhqdq %ymm15,%ymm14,%ymm14
vpunpcklqdq %ymm10,%ymm8,%ymm11
vpunpckhqdq %ymm10,%ymm8,%ymm8
vpaddd 256-256(%rcx),%ymm0,%ymm0
vpaddd 288-256(%rcx),%ymm1,%ymm1
vpaddd 320-256(%rcx),%ymm2,%ymm2
vpaddd 352-256(%rcx),%ymm3,%ymm3
vpunpckldq %ymm1,%ymm0,%ymm10
vpunpckldq %ymm3,%ymm2,%ymm15
vpunpckhdq %ymm1,%ymm0,%ymm0
vpunpckhdq %ymm3,%ymm2,%ymm2
vpunpcklqdq %ymm15,%ymm10,%ymm1
vpunpckhqdq %ymm15,%ymm10,%ymm10
vpunpcklqdq %ymm2,%ymm0,%ymm3
vpunpckhqdq %ymm2,%ymm0,%ymm0
vperm2i128 $0x20,%ymm1,%ymm9,%ymm15
vperm2i128 $0x31,%ymm1,%ymm9,%ymm1
vperm2i128 $0x20,%ymm10,%ymm14,%ymm9
vperm2i128 $0x31,%ymm10,%ymm14,%ymm10
vperm2i128 $0x20,%ymm3,%ymm11,%ymm14
vperm2i128 $0x31,%ymm3,%ymm11,%ymm3
vperm2i128 $0x20,%ymm0,%ymm8,%ymm11
vperm2i128 $0x31,%ymm0,%ymm8,%ymm0
vmovdqa %ymm15,0(%rsp)
vmovdqa %ymm9,32(%rsp)
vmovdqa 64(%rsp),%ymm15
vmovdqa 96(%rsp),%ymm9
vpaddd 384-512(%rax),%ymm12,%ymm12
vpaddd 416-512(%rax),%ymm13,%ymm13
vpaddd 448-512(%rax),%ymm15,%ymm15
vpaddd 480-512(%rax),%ymm9,%ymm9
vpunpckldq %ymm13,%ymm12,%ymm2
vpunpckldq %ymm9,%ymm15,%ymm8
vpunpckhdq %ymm13,%ymm12,%ymm12
vpunpckhdq %ymm9,%ymm15,%ymm15
vpunpcklqdq %ymm8,%ymm2,%ymm13
vpunpckhqdq %ymm8,%ymm2,%ymm2
vpunpcklqdq %ymm15,%ymm12,%ymm9
vpunpckhqdq %ymm15,%ymm12,%ymm12
vpaddd 512-512(%rax),%ymm4,%ymm4
vpaddd 544-512(%rax),%ymm5,%ymm5
vpaddd 576-512(%rax),%ymm6,%ymm6
vpaddd 608-512(%rax),%ymm7,%ymm7
vpunpckldq %ymm5,%ymm4,%ymm15
vpunpckldq %ymm7,%ymm6,%ymm8
vpunpckhdq %ymm5,%ymm4,%ymm4
vpunpckhdq %ymm7,%ymm6,%ymm6
vpunpcklqdq %ymm8,%ymm15,%ymm5
vpunpckhqdq %ymm8,%ymm15,%ymm15
vpunpcklqdq %ymm6,%ymm4,%ymm7
vpunpckhqdq %ymm6,%ymm4,%ymm4
vperm2i128 $0x20,%ymm5,%ymm13,%ymm8
vperm2i128 $0x31,%ymm5,%ymm13,%ymm5
vperm2i128 $0x20,%ymm15,%ymm2,%ymm13
vperm2i128 $0x31,%ymm15,%ymm2,%ymm15
vperm2i128 $0x20,%ymm7,%ymm9,%ymm2
vperm2i128 $0x31,%ymm7,%ymm9,%ymm7
vperm2i128 $0x20,%ymm4,%ymm12,%ymm9
vperm2i128 $0x31,%ymm4,%ymm12,%ymm4
vmovdqa 0(%rsp),%ymm6
vmovdqa 32(%rsp),%ymm12
cmpq $512,%rdx
jb .Ltail8x
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
leaq 128(%rsi),%rsi
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
leaq 128(%rdi),%rdi
vpxor 0(%rsi),%ymm12,%ymm12
vpxor 32(%rsi),%ymm13,%ymm13
vpxor 64(%rsi),%ymm10,%ymm10
vpxor 96(%rsi),%ymm15,%ymm15
leaq 128(%rsi),%rsi
vmovdqu %ymm12,0(%rdi)
vmovdqu %ymm13,32(%rdi)
vmovdqu %ymm10,64(%rdi)
vmovdqu %ymm15,96(%rdi)
leaq 128(%rdi),%rdi
vpxor 0(%rsi),%ymm14,%ymm14
vpxor 32(%rsi),%ymm2,%ymm2
vpxor 64(%rsi),%ymm3,%ymm3
vpxor 96(%rsi),%ymm7,%ymm7
leaq 128(%rsi),%rsi
vmovdqu %ymm14,0(%rdi)
vmovdqu %ymm2,32(%rdi)
vmovdqu %ymm3,64(%rdi)
vmovdqu %ymm7,96(%rdi)
leaq 128(%rdi),%rdi
vpxor 0(%rsi),%ymm11,%ymm11
vpxor 32(%rsi),%ymm9,%ymm9
vpxor 64(%rsi),%ymm0,%ymm0
vpxor 96(%rsi),%ymm4,%ymm4
leaq 128(%rsi),%rsi
vmovdqu %ymm11,0(%rdi)
vmovdqu %ymm9,32(%rdi)
vmovdqu %ymm0,64(%rdi)
vmovdqu %ymm4,96(%rdi)
leaq 128(%rdi),%rdi
subq $512,%rdx
jnz .Loop_outer8x
jmp .Ldone8x
.Ltail8x:
cmpq $448,%rdx
jae .L448_or_more8x
cmpq $384,%rdx
jae .L384_or_more8x
cmpq $320,%rdx
jae .L320_or_more8x
cmpq $256,%rdx
jae .L256_or_more8x
cmpq $192,%rdx
jae .L192_or_more8x
cmpq $128,%rdx
jae .L128_or_more8x
cmpq $64,%rdx
jae .L64_or_more8x
xorq %r10,%r10
vmovdqa %ymm6,0(%rsp)
vmovdqa %ymm8,32(%rsp)
jmp .Loop_tail8x
.align 32
.L64_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
je .Ldone8x
leaq 64(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm1,0(%rsp)
leaq 64(%rdi),%rdi
subq $64,%rdx
vmovdqa %ymm5,32(%rsp)
jmp .Loop_tail8x
.align 32
.L128_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
je .Ldone8x
leaq 128(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm12,0(%rsp)
leaq 128(%rdi),%rdi
subq $128,%rdx
vmovdqa %ymm13,32(%rsp)
jmp .Loop_tail8x
.align 32
.L192_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vpxor 128(%rsi),%ymm12,%ymm12
vpxor 160(%rsi),%ymm13,%ymm13
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
vmovdqu %ymm12,128(%rdi)
vmovdqu %ymm13,160(%rdi)
je .Ldone8x
leaq 192(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm10,0(%rsp)
leaq 192(%rdi),%rdi
subq $192,%rdx
vmovdqa %ymm15,32(%rsp)
jmp .Loop_tail8x
.align 32
.L256_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vpxor 128(%rsi),%ymm12,%ymm12
vpxor 160(%rsi),%ymm13,%ymm13
vpxor 192(%rsi),%ymm10,%ymm10
vpxor 224(%rsi),%ymm15,%ymm15
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
vmovdqu %ymm12,128(%rdi)
vmovdqu %ymm13,160(%rdi)
vmovdqu %ymm10,192(%rdi)
vmovdqu %ymm15,224(%rdi)
je .Ldone8x
leaq 256(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm14,0(%rsp)
leaq 256(%rdi),%rdi
subq $256,%rdx
vmovdqa %ymm2,32(%rsp)
jmp .Loop_tail8x
.align 32
.L320_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vpxor 128(%rsi),%ymm12,%ymm12
vpxor 160(%rsi),%ymm13,%ymm13
vpxor 192(%rsi),%ymm10,%ymm10
vpxor 224(%rsi),%ymm15,%ymm15
vpxor 256(%rsi),%ymm14,%ymm14
vpxor 288(%rsi),%ymm2,%ymm2
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
vmovdqu %ymm12,128(%rdi)
vmovdqu %ymm13,160(%rdi)
vmovdqu %ymm10,192(%rdi)
vmovdqu %ymm15,224(%rdi)
vmovdqu %ymm14,256(%rdi)
vmovdqu %ymm2,288(%rdi)
je .Ldone8x
leaq 320(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm3,0(%rsp)
leaq 320(%rdi),%rdi
subq $320,%rdx
vmovdqa %ymm7,32(%rsp)
jmp .Loop_tail8x
.align 32
.L384_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vpxor 128(%rsi),%ymm12,%ymm12
vpxor 160(%rsi),%ymm13,%ymm13
vpxor 192(%rsi),%ymm10,%ymm10
vpxor 224(%rsi),%ymm15,%ymm15
vpxor 256(%rsi),%ymm14,%ymm14
vpxor 288(%rsi),%ymm2,%ymm2
vpxor 320(%rsi),%ymm3,%ymm3
vpxor 352(%rsi),%ymm7,%ymm7
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
vmovdqu %ymm12,128(%rdi)
vmovdqu %ymm13,160(%rdi)
vmovdqu %ymm10,192(%rdi)
vmovdqu %ymm15,224(%rdi)
vmovdqu %ymm14,256(%rdi)
vmovdqu %ymm2,288(%rdi)
vmovdqu %ymm3,320(%rdi)
vmovdqu %ymm7,352(%rdi)
je .Ldone8x
leaq 384(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm11,0(%rsp)
leaq 384(%rdi),%rdi
subq $384,%rdx
vmovdqa %ymm9,32(%rsp)
jmp .Loop_tail8x
.align 32
.L448_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vpxor 128(%rsi),%ymm12,%ymm12
vpxor 160(%rsi),%ymm13,%ymm13
vpxor 192(%rsi),%ymm10,%ymm10
vpxor 224(%rsi),%ymm15,%ymm15
vpxor 256(%rsi),%ymm14,%ymm14
vpxor 288(%rsi),%ymm2,%ymm2
vpxor 320(%rsi),%ymm3,%ymm3
vpxor 352(%rsi),%ymm7,%ymm7
vpxor 384(%rsi),%ymm11,%ymm11
vpxor 416(%rsi),%ymm9,%ymm9
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
vmovdqu %ymm12,128(%rdi)
vmovdqu %ymm13,160(%rdi)
vmovdqu %ymm10,192(%rdi)
vmovdqu %ymm15,224(%rdi)
vmovdqu %ymm14,256(%rdi)
vmovdqu %ymm2,288(%rdi)
vmovdqu %ymm3,320(%rdi)
vmovdqu %ymm7,352(%rdi)
vmovdqu %ymm11,384(%rdi)
vmovdqu %ymm9,416(%rdi)
je .Ldone8x
leaq 448(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm0,0(%rsp)
leaq 448(%rdi),%rdi
subq $448,%rdx
vmovdqa %ymm4,32(%rsp)
.Loop_tail8x:
movzbl (%rsi,%r10,1),%eax
movzbl (%rsp,%r10,1),%ecx
leaq 1(%r10),%r10
xorl %ecx,%eax
movb %al,-1(%rdi,%r10,1)
decq %rdx
jnz .Loop_tail8x
.Ldone8x:
vzeroall
leaq (%r9),%rsp
.cfi_def_cfa_register rsp
.L8x_epilogue:
ret
.cfi_endproc
.size ChaCha20_ctr32_avx2,.-ChaCha20_ctr32_avx2
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 19,660
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aesni-gcm-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.type _aesni_ctr32_ghash_6x,@function
.align 32
_aesni_ctr32_ghash_6x:
.cfi_startproc
vmovdqu 32(%r11),%xmm2
subq $6,%rdx
vpxor %xmm4,%xmm4,%xmm4
vmovdqu 0-128(%rcx),%xmm15
vpaddb %xmm2,%xmm1,%xmm10
vpaddb %xmm2,%xmm10,%xmm11
vpaddb %xmm2,%xmm11,%xmm12
vpaddb %xmm2,%xmm12,%xmm13
vpaddb %xmm2,%xmm13,%xmm14
vpxor %xmm15,%xmm1,%xmm9
vmovdqu %xmm4,16+8(%rsp)
jmp .Loop6x
.align 32
.Loop6x:
addl $100663296,%ebx
jc .Lhandle_ctr32
vmovdqu 0-32(%r9),%xmm3
vpaddb %xmm2,%xmm14,%xmm1
vpxor %xmm15,%xmm10,%xmm10
vpxor %xmm15,%xmm11,%xmm11
.Lresume_ctr32:
vmovdqu %xmm1,(%r8)
vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5
vpxor %xmm15,%xmm12,%xmm12
vmovups 16-128(%rcx),%xmm2
vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6
xorq %r12,%r12
cmpq %r14,%r15
vaesenc %xmm2,%xmm9,%xmm9
vmovdqu 48+8(%rsp),%xmm0
vpxor %xmm15,%xmm13,%xmm13
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1
vaesenc %xmm2,%xmm10,%xmm10
vpxor %xmm15,%xmm14,%xmm14
setnc %r12b
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vmovdqu 16-32(%r9),%xmm3
negq %r12
vaesenc %xmm2,%xmm12,%xmm12
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5
vpxor %xmm4,%xmm8,%xmm8
vaesenc %xmm2,%xmm13,%xmm13
vpxor %xmm5,%xmm1,%xmm4
andq $0x60,%r12
vmovups 32-128(%rcx),%xmm15
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1
vaesenc %xmm2,%xmm14,%xmm14
vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2
leaq (%r14,%r12,1),%r14
vaesenc %xmm15,%xmm9,%xmm9
vpxor 16+8(%rsp),%xmm8,%xmm8
vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3
vmovdqu 64+8(%rsp),%xmm0
vaesenc %xmm15,%xmm10,%xmm10
movbeq 88(%r14),%r13
vaesenc %xmm15,%xmm11,%xmm11
movbeq 80(%r14),%r12
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,32+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,40+8(%rsp)
vmovdqu 48-32(%r9),%xmm5
vaesenc %xmm15,%xmm14,%xmm14
vmovups 48-128(%rcx),%xmm15
vpxor %xmm1,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm2,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2
vaesenc %xmm15,%xmm10,%xmm10
vpxor %xmm3,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3
vaesenc %xmm15,%xmm11,%xmm11
vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5
vmovdqu 80+8(%rsp),%xmm0
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vpxor %xmm1,%xmm4,%xmm4
vmovdqu 64-32(%r9),%xmm1
vaesenc %xmm15,%xmm14,%xmm14
vmovups 64-128(%rcx),%xmm15
vpxor %xmm2,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm3,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3
vaesenc %xmm15,%xmm10,%xmm10
movbeq 72(%r14),%r13
vpxor %xmm5,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5
vaesenc %xmm15,%xmm11,%xmm11
movbeq 64(%r14),%r12
vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1
vmovdqu 96+8(%rsp),%xmm0
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,48+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,56+8(%rsp)
vpxor %xmm2,%xmm4,%xmm4
vmovdqu 96-32(%r9),%xmm2
vaesenc %xmm15,%xmm14,%xmm14
vmovups 80-128(%rcx),%xmm15
vpxor %xmm3,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5
vaesenc %xmm15,%xmm10,%xmm10
movbeq 56(%r14),%r13
vpxor %xmm1,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1
vpxor 112+8(%rsp),%xmm8,%xmm8
vaesenc %xmm15,%xmm11,%xmm11
movbeq 48(%r14),%r12
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,64+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,72+8(%rsp)
vpxor %xmm3,%xmm4,%xmm4
vmovdqu 112-32(%r9),%xmm3
vaesenc %xmm15,%xmm14,%xmm14
vmovups 96-128(%rcx),%xmm15
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm1,%xmm6,%xmm6
vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1
vaesenc %xmm15,%xmm10,%xmm10
movbeq 40(%r14),%r13
vpxor %xmm2,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2
vaesenc %xmm15,%xmm11,%xmm11
movbeq 32(%r14),%r12
vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,80+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,88+8(%rsp)
vpxor %xmm5,%xmm6,%xmm6
vaesenc %xmm15,%xmm14,%xmm14
vpxor %xmm1,%xmm6,%xmm6
vmovups 112-128(%rcx),%xmm15
vpslldq $8,%xmm6,%xmm5
vpxor %xmm2,%xmm4,%xmm4
vmovdqu 16(%r11),%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm8,%xmm7,%xmm7
vaesenc %xmm15,%xmm10,%xmm10
vpxor %xmm5,%xmm4,%xmm4
movbeq 24(%r14),%r13
vaesenc %xmm15,%xmm11,%xmm11
movbeq 16(%r14),%r12
vpalignr $8,%xmm4,%xmm4,%xmm0
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
movq %r13,96+8(%rsp)
vaesenc %xmm15,%xmm12,%xmm12
movq %r12,104+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
vmovups 128-128(%rcx),%xmm1
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vmovups 144-128(%rcx),%xmm15
vaesenc %xmm1,%xmm10,%xmm10
vpsrldq $8,%xmm6,%xmm6
vaesenc %xmm1,%xmm11,%xmm11
vpxor %xmm6,%xmm7,%xmm7
vaesenc %xmm1,%xmm12,%xmm12
vpxor %xmm0,%xmm4,%xmm4
movbeq 8(%r14),%r13
vaesenc %xmm1,%xmm13,%xmm13
movbeq 0(%r14),%r12
vaesenc %xmm1,%xmm14,%xmm14
vmovups 160-128(%rcx),%xmm1
cmpl $11,%r10d
jb .Lenc_tail
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovups 176-128(%rcx),%xmm15
vaesenc %xmm1,%xmm14,%xmm14
vmovups 192-128(%rcx),%xmm1
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovups 208-128(%rcx),%xmm15
vaesenc %xmm1,%xmm14,%xmm14
vmovups 224-128(%rcx),%xmm1
jmp .Lenc_tail
.align 32
.Lhandle_ctr32:
vmovdqu (%r11),%xmm0
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
vpaddd 64(%r11),%xmm6,%xmm10
vpaddd %xmm5,%xmm6,%xmm11
vmovdqu 0-32(%r9),%xmm3
vpaddd %xmm5,%xmm10,%xmm12
vpshufb %xmm0,%xmm10,%xmm10
vpaddd %xmm5,%xmm11,%xmm13
vpshufb %xmm0,%xmm11,%xmm11
vpxor %xmm15,%xmm10,%xmm10
vpaddd %xmm5,%xmm12,%xmm14
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm15,%xmm11,%xmm11
vpaddd %xmm5,%xmm13,%xmm1
vpshufb %xmm0,%xmm13,%xmm13
vpshufb %xmm0,%xmm14,%xmm14
vpshufb %xmm0,%xmm1,%xmm1
jmp .Lresume_ctr32
.align 32
.Lenc_tail:
vaesenc %xmm15,%xmm9,%xmm9
vmovdqu %xmm7,16+8(%rsp)
vpalignr $8,%xmm4,%xmm4,%xmm8
vaesenc %xmm15,%xmm10,%xmm10
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
vpxor 0(%rdi),%xmm1,%xmm2
vaesenc %xmm15,%xmm11,%xmm11
vpxor 16(%rdi),%xmm1,%xmm0
vaesenc %xmm15,%xmm12,%xmm12
vpxor 32(%rdi),%xmm1,%xmm5
vaesenc %xmm15,%xmm13,%xmm13
vpxor 48(%rdi),%xmm1,%xmm6
vaesenc %xmm15,%xmm14,%xmm14
vpxor 64(%rdi),%xmm1,%xmm7
vpxor 80(%rdi),%xmm1,%xmm3
vmovdqu (%r8),%xmm1
vaesenclast %xmm2,%xmm9,%xmm9
vmovdqu 32(%r11),%xmm2
vaesenclast %xmm0,%xmm10,%xmm10
vpaddb %xmm2,%xmm1,%xmm0
movq %r13,112+8(%rsp)
leaq 96(%rdi),%rdi
prefetcht0 512(%rdi)
prefetcht0 576(%rdi)
vaesenclast %xmm5,%xmm11,%xmm11
vpaddb %xmm2,%xmm0,%xmm5
movq %r12,120+8(%rsp)
leaq 96(%rsi),%rsi
vmovdqu 0-128(%rcx),%xmm15
vaesenclast %xmm6,%xmm12,%xmm12
vpaddb %xmm2,%xmm5,%xmm6
vaesenclast %xmm7,%xmm13,%xmm13
vpaddb %xmm2,%xmm6,%xmm7
vaesenclast %xmm3,%xmm14,%xmm14
vpaddb %xmm2,%xmm7,%xmm3
addq $0x60,%rax
subq $0x6,%rdx
jc .L6x_done
vmovups %xmm9,-96(%rsi)
vpxor %xmm15,%xmm1,%xmm9
vmovups %xmm10,-80(%rsi)
vmovdqa %xmm0,%xmm10
vmovups %xmm11,-64(%rsi)
vmovdqa %xmm5,%xmm11
vmovups %xmm12,-48(%rsi)
vmovdqa %xmm6,%xmm12
vmovups %xmm13,-32(%rsi)
vmovdqa %xmm7,%xmm13
vmovups %xmm14,-16(%rsi)
vmovdqa %xmm3,%xmm14
vmovdqu 32+8(%rsp),%xmm7
jmp .Loop6x
.L6x_done:
vpxor 16+8(%rsp),%xmm8,%xmm8
vpxor %xmm4,%xmm8,%xmm8
ret
.cfi_endproc
.size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x
.globl aesni_gcm_decrypt
.hidden aesni_gcm_decrypt
.type aesni_gcm_decrypt,@function
.align 32
aesni_gcm_decrypt:
.cfi_startproc
_CET_ENDBR
xorq %rax,%rax
cmpq $0x60,%rdx
jb .Lgcm_dec_abort
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
movq %rsp,%rbp
.cfi_def_cfa_register %rbp
pushq %rbx
.cfi_offset %rbx,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
vzeroupper
movq 16(%rbp),%r12
vmovdqu (%r8),%xmm1
addq $-128,%rsp
movl 12(%r8),%ebx
leaq .Lbswap_mask(%rip),%r11
leaq -128(%rcx),%r14
movq $0xf80,%r15
vmovdqu (%r12),%xmm8
andq $-128,%rsp
vmovdqu (%r11),%xmm0
leaq 128(%rcx),%rcx
leaq 32(%r9),%r9
movl 240-128(%rcx),%r10d
vpshufb %xmm0,%xmm8,%xmm8
andq %r15,%r14
andq %rsp,%r15
subq %r14,%r15
jc .Ldec_no_key_aliasing
cmpq $768,%r15
jnc .Ldec_no_key_aliasing
subq %r15,%rsp
.Ldec_no_key_aliasing:
vmovdqu 80(%rdi),%xmm7
movq %rdi,%r14
vmovdqu 64(%rdi),%xmm4
leaq -192(%rdi,%rdx,1),%r15
vmovdqu 48(%rdi),%xmm5
shrq $4,%rdx
xorq %rax,%rax
vmovdqu 32(%rdi),%xmm6
vpshufb %xmm0,%xmm7,%xmm7
vmovdqu 16(%rdi),%xmm2
vpshufb %xmm0,%xmm4,%xmm4
vmovdqu (%rdi),%xmm3
vpshufb %xmm0,%xmm5,%xmm5
vmovdqu %xmm4,48(%rsp)
vpshufb %xmm0,%xmm6,%xmm6
vmovdqu %xmm5,64(%rsp)
vpshufb %xmm0,%xmm2,%xmm2
vmovdqu %xmm6,80(%rsp)
vpshufb %xmm0,%xmm3,%xmm3
vmovdqu %xmm2,96(%rsp)
vmovdqu %xmm3,112(%rsp)
call _aesni_ctr32_ghash_6x
movq 16(%rbp),%r12
vmovups %xmm9,-96(%rsi)
vmovups %xmm10,-80(%rsi)
vmovups %xmm11,-64(%rsi)
vmovups %xmm12,-48(%rsi)
vmovups %xmm13,-32(%rsi)
vmovups %xmm14,-16(%rsi)
vpshufb (%r11),%xmm8,%xmm8
vmovdqu %xmm8,(%r12)
vzeroupper
leaq -40(%rbp),%rsp
.cfi_def_cfa %rsp, 0x38
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
.Lgcm_dec_abort:
ret
.cfi_endproc
.size aesni_gcm_decrypt,.-aesni_gcm_decrypt
.type _aesni_ctr32_6x,@function
.align 32
_aesni_ctr32_6x:
.cfi_startproc
vmovdqu 0-128(%rcx),%xmm4
vmovdqu 32(%r11),%xmm2
leaq -1(%r10),%r13
vmovups 16-128(%rcx),%xmm15
leaq 32-128(%rcx),%r12
vpxor %xmm4,%xmm1,%xmm9
addl $100663296,%ebx
jc .Lhandle_ctr32_2
vpaddb %xmm2,%xmm1,%xmm10
vpaddb %xmm2,%xmm10,%xmm11
vpxor %xmm4,%xmm10,%xmm10
vpaddb %xmm2,%xmm11,%xmm12
vpxor %xmm4,%xmm11,%xmm11
vpaddb %xmm2,%xmm12,%xmm13
vpxor %xmm4,%xmm12,%xmm12
vpaddb %xmm2,%xmm13,%xmm14
vpxor %xmm4,%xmm13,%xmm13
vpaddb %xmm2,%xmm14,%xmm1
vpxor %xmm4,%xmm14,%xmm14
jmp .Loop_ctr32
.align 16
.Loop_ctr32:
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vmovups (%r12),%xmm15
leaq 16(%r12),%r12
decl %r13d
jnz .Loop_ctr32
vmovdqu (%r12),%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor 0(%rdi),%xmm3,%xmm4
vaesenc %xmm15,%xmm10,%xmm10
vpxor 16(%rdi),%xmm3,%xmm5
vaesenc %xmm15,%xmm11,%xmm11
vpxor 32(%rdi),%xmm3,%xmm6
vaesenc %xmm15,%xmm12,%xmm12
vpxor 48(%rdi),%xmm3,%xmm8
vaesenc %xmm15,%xmm13,%xmm13
vpxor 64(%rdi),%xmm3,%xmm2
vaesenc %xmm15,%xmm14,%xmm14
vpxor 80(%rdi),%xmm3,%xmm3
leaq 96(%rdi),%rdi
vaesenclast %xmm4,%xmm9,%xmm9
vaesenclast %xmm5,%xmm10,%xmm10
vaesenclast %xmm6,%xmm11,%xmm11
vaesenclast %xmm8,%xmm12,%xmm12
vaesenclast %xmm2,%xmm13,%xmm13
vaesenclast %xmm3,%xmm14,%xmm14
vmovups %xmm9,0(%rsi)
vmovups %xmm10,16(%rsi)
vmovups %xmm11,32(%rsi)
vmovups %xmm12,48(%rsi)
vmovups %xmm13,64(%rsi)
vmovups %xmm14,80(%rsi)
leaq 96(%rsi),%rsi
ret
.align 32
.Lhandle_ctr32_2:
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
vpaddd 64(%r11),%xmm6,%xmm10
vpaddd %xmm5,%xmm6,%xmm11
vpaddd %xmm5,%xmm10,%xmm12
vpshufb %xmm0,%xmm10,%xmm10
vpaddd %xmm5,%xmm11,%xmm13
vpshufb %xmm0,%xmm11,%xmm11
vpxor %xmm4,%xmm10,%xmm10
vpaddd %xmm5,%xmm12,%xmm14
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm4,%xmm11,%xmm11
vpaddd %xmm5,%xmm13,%xmm1
vpshufb %xmm0,%xmm13,%xmm13
vpxor %xmm4,%xmm12,%xmm12
vpshufb %xmm0,%xmm14,%xmm14
vpxor %xmm4,%xmm13,%xmm13
vpshufb %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm14,%xmm14
jmp .Loop_ctr32
.cfi_endproc
.size _aesni_ctr32_6x,.-_aesni_ctr32_6x
.globl aesni_gcm_encrypt
.hidden aesni_gcm_encrypt
.type aesni_gcm_encrypt,@function
.align 32
aesni_gcm_encrypt:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
.extern BORINGSSL_function_hit
.hidden BORINGSSL_function_hit
movb $1,BORINGSSL_function_hit+2(%rip)
#endif
xorq %rax,%rax
cmpq $288,%rdx
jb .Lgcm_enc_abort
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
movq %rsp,%rbp
.cfi_def_cfa_register %rbp
pushq %rbx
.cfi_offset %rbx,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
vzeroupper
vmovdqu (%r8),%xmm1
addq $-128,%rsp
movl 12(%r8),%ebx
leaq .Lbswap_mask(%rip),%r11
leaq -128(%rcx),%r14
movq $0xf80,%r15
leaq 128(%rcx),%rcx
vmovdqu (%r11),%xmm0
andq $-128,%rsp
movl 240-128(%rcx),%r10d
andq %r15,%r14
andq %rsp,%r15
subq %r14,%r15
jc .Lenc_no_key_aliasing
cmpq $768,%r15
jnc .Lenc_no_key_aliasing
subq %r15,%rsp
.Lenc_no_key_aliasing:
movq %rsi,%r14
leaq -192(%rsi,%rdx,1),%r15
shrq $4,%rdx
call _aesni_ctr32_6x
vpshufb %xmm0,%xmm9,%xmm8
vpshufb %xmm0,%xmm10,%xmm2
vmovdqu %xmm8,112(%rsp)
vpshufb %xmm0,%xmm11,%xmm4
vmovdqu %xmm2,96(%rsp)
vpshufb %xmm0,%xmm12,%xmm5
vmovdqu %xmm4,80(%rsp)
vpshufb %xmm0,%xmm13,%xmm6
vmovdqu %xmm5,64(%rsp)
vpshufb %xmm0,%xmm14,%xmm7
vmovdqu %xmm6,48(%rsp)
call _aesni_ctr32_6x
movq 16(%rbp),%r12
leaq 32(%r9),%r9
vmovdqu (%r12),%xmm8
subq $12,%rdx
movq $192,%rax
vpshufb %xmm0,%xmm8,%xmm8
call _aesni_ctr32_ghash_6x
vmovdqu 32(%rsp),%xmm7
vmovdqu (%r11),%xmm0
vmovdqu 0-32(%r9),%xmm3
vpunpckhqdq %xmm7,%xmm7,%xmm1
vmovdqu 32-32(%r9),%xmm15
vmovups %xmm9,-96(%rsi)
vpshufb %xmm0,%xmm9,%xmm9
vpxor %xmm7,%xmm1,%xmm1
vmovups %xmm10,-80(%rsi)
vpshufb %xmm0,%xmm10,%xmm10
vmovups %xmm11,-64(%rsi)
vpshufb %xmm0,%xmm11,%xmm11
vmovups %xmm12,-48(%rsi)
vpshufb %xmm0,%xmm12,%xmm12
vmovups %xmm13,-32(%rsi)
vpshufb %xmm0,%xmm13,%xmm13
vmovups %xmm14,-16(%rsi)
vpshufb %xmm0,%xmm14,%xmm14
vmovdqu %xmm9,16(%rsp)
vmovdqu 48(%rsp),%xmm6
vmovdqu 16-32(%r9),%xmm0
vpunpckhqdq %xmm6,%xmm6,%xmm2
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5
vpxor %xmm6,%xmm2,%xmm2
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
vmovdqu 64(%rsp),%xmm9
vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4
vmovdqu 48-32(%r9),%xmm3
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm9,%xmm9,%xmm5
vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6
vpxor %xmm9,%xmm5,%xmm5
vpxor %xmm7,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
vmovdqu 80-32(%r9),%xmm15
vpxor %xmm1,%xmm2,%xmm2
vmovdqu 80(%rsp),%xmm1
vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7
vmovdqu 64-32(%r9),%xmm0
vpxor %xmm4,%xmm7,%xmm7
vpunpckhqdq %xmm1,%xmm1,%xmm4
vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpxor %xmm6,%xmm9,%xmm9
vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 96(%rsp),%xmm2
vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6
vmovdqu 96-32(%r9),%xmm3
vpxor %xmm7,%xmm6,%xmm6
vpunpckhqdq %xmm2,%xmm2,%xmm7
vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpxor %xmm9,%xmm1,%xmm1
vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4
vmovdqu 128-32(%r9),%xmm15
vpxor %xmm5,%xmm4,%xmm4
vpxor 112(%rsp),%xmm8,%xmm8
vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5
vmovdqu 112-32(%r9),%xmm0
vpunpckhqdq %xmm8,%xmm8,%xmm9
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2
vpxor %xmm8,%xmm9,%xmm9
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7
vpxor %xmm4,%xmm7,%xmm4
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6
vmovdqu 0-32(%r9),%xmm3
vpunpckhqdq %xmm14,%xmm14,%xmm1
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8
vpxor %xmm14,%xmm1,%xmm1
vpxor %xmm5,%xmm6,%xmm5
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9
vmovdqu 32-32(%r9),%xmm15
vpxor %xmm2,%xmm8,%xmm7
vpxor %xmm4,%xmm9,%xmm6
vmovdqu 16-32(%r9),%xmm0
vpxor %xmm5,%xmm7,%xmm9
vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4
vpxor %xmm9,%xmm6,%xmm6
vpunpckhqdq %xmm13,%xmm13,%xmm2
vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14
vpxor %xmm13,%xmm2,%xmm2
vpslldq $8,%xmm6,%xmm9
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
vpxor %xmm9,%xmm5,%xmm8
vpsrldq $8,%xmm6,%xmm6
vpxor %xmm6,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5
vmovdqu 48-32(%r9),%xmm3
vpxor %xmm4,%xmm5,%xmm5
vpunpckhqdq %xmm12,%xmm12,%xmm9
vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13
vpxor %xmm12,%xmm9,%xmm9
vpxor %xmm14,%xmm13,%xmm13
vpalignr $8,%xmm8,%xmm8,%xmm14
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
vmovdqu 80-32(%r9),%xmm15
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4
vmovdqu 64-32(%r9),%xmm0
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm11,%xmm11,%xmm1
vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12
vpxor %xmm11,%xmm1,%xmm1
vpxor %xmm13,%xmm12,%xmm12
vxorps 16(%rsp),%xmm7,%xmm7
vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9
vpxor %xmm2,%xmm9,%xmm9
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
vxorps %xmm14,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5
vmovdqu 96-32(%r9),%xmm3
vpxor %xmm4,%xmm5,%xmm5
vpunpckhqdq %xmm10,%xmm10,%xmm2
vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11
vpxor %xmm10,%xmm2,%xmm2
vpalignr $8,%xmm8,%xmm8,%xmm14
vpxor %xmm12,%xmm11,%xmm11
vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1
vmovdqu 128-32(%r9),%xmm15
vpxor %xmm9,%xmm1,%xmm1
vxorps %xmm7,%xmm14,%xmm14
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
vxorps %xmm14,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4
vmovdqu 112-32(%r9),%xmm0
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm8,%xmm8,%xmm9
vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10
vpxor %xmm8,%xmm9,%xmm9
vpxor %xmm11,%xmm10,%xmm10
vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7
vpxor %xmm4,%xmm5,%xmm5
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6
vpxor %xmm10,%xmm7,%xmm7
vpxor %xmm2,%xmm6,%xmm6
vpxor %xmm5,%xmm7,%xmm4
vpxor %xmm4,%xmm6,%xmm6
vpslldq $8,%xmm6,%xmm1
vmovdqu 16(%r11),%xmm3
vpsrldq $8,%xmm6,%xmm6
vpxor %xmm1,%xmm5,%xmm8
vpxor %xmm6,%xmm7,%xmm7
vpalignr $8,%xmm8,%xmm8,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
vpxor %xmm2,%xmm8,%xmm8
vpalignr $8,%xmm8,%xmm8,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
vpxor %xmm7,%xmm2,%xmm2
vpxor %xmm2,%xmm8,%xmm8
movq 16(%rbp),%r12
vpshufb (%r11),%xmm8,%xmm8
vmovdqu %xmm8,(%r12)
vzeroupper
leaq -40(%rbp),%rsp
.cfi_def_cfa %rsp, 0x38
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
.Lgcm_enc_abort:
ret
.cfi_endproc
.size aesni_gcm_encrypt,.-aesni_gcm_encrypt
.section .rodata
.align 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.Lpoly:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
.Lone_msb:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
.Ltwo_lsb:
.byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.Lone_lsb:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.text
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 82,352
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aesv8-gcm-armv8-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#if __ARM_MAX_ARCH__ >= 8
.arch armv8-a+crypto
.text
.globl aes_gcm_enc_kernel
.hidden aes_gcm_enc_kernel
.type aes_gcm_enc_kernel,%function
.align 4
aes_gcm_enc_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
add x4, x0, x1, lsr #3 // end_input_ptr
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
sub x5, x5, #1 // byte_len - 1
ldr q18, [x8, #0] // load rk0
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
ldr q25, [x8, #112] // load rk7
add x5, x5, x0
lsr x12, x11, #32
fmov d2, x10 // CTR block 2
orr w11, w11, w11
rev w12, w12 // rev_ctr32
fmov d1, x10 // CTR block 1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
add w12, w12, #1 // increment rev_ctr32
rev w9, w12 // CTR block 1
fmov d3, x10 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 1
add w12, w12, #1 // CTR block 1
ldr q19, [x8, #16] // load rk1
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
ldr q20, [x8, #32] // load rk2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
orr x9, x11, x9, lsl #32 // CTR block 3
fmov v3.d[1], x9 // CTR block 3
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q21, [x8, #48] // load rk3
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q24, [x8, #96] // load rk6
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q23, [x8, #80] // load rk5
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q22, [x8, #64] // load rk4
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
ldr q29, [x8, #176] // load rk11
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
ldr q26, [x8, #128] // load rk8
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
add w12, w12, #1 // CTR block 3
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
ldr q27, [x8, #144] // load rk9
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
ldr q28, [x8, #160] // load rk10
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
b.lt .Lenc_finish_first_blocks // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
b.eq .Lenc_finish_first_blocks // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
.Lenc_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v2.16b, v31.16b // AES block 2 - round N-1
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
aese v3.16b, v31.16b // AES block 3 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
b.ge .Lenc_tail // handle tail
ldp x19, x20, [x0, #16] // AES block 1 - load plaintext
rev w9, w12 // CTR block 4
ldp x6, x7, [x0, #0] // AES block 0 - load plaintext
ldp x23, x24, [x0, #48] // AES block 3 - load plaintext
ldp x21, x22, [x0, #32] // AES block 2 - load plaintext
add x0, x0, #64 // AES input_ptr update
eor x19, x19, x13 // AES block 1 - round N low
eor x20, x20, x14 // AES block 1 - round N high
fmov d5, x19 // AES block 1 - mov low
eor x6, x6, x13 // AES block 0 - round N low
eor x7, x7, x14 // AES block 0 - round N high
eor x24, x24, x14 // AES block 3 - round N high
fmov d4, x6 // AES block 0 - mov low
cmp x0, x5 // check if we have <= 8 blocks
fmov v4.d[1], x7 // AES block 0 - mov high
eor x23, x23, x13 // AES block 3 - round N low
eor x21, x21, x13 // AES block 2 - round N low
fmov v5.d[1], x20 // AES block 1 - mov high
fmov d6, x21 // AES block 2 - mov low
add w12, w12, #1 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov d7, x23 // AES block 3 - mov low
eor x22, x22, x14 // AES block 2 - round N high
fmov v6.d[1], x22 // AES block 2 - mov high
eor v4.16b, v4.16b, v0.16b // AES block 0 - result
fmov d0, x10 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
eor v5.16b, v5.16b, v1.16b // AES block 1 - result
fmov d1, x10 // CTR block 5
orr x9, x11, x9, lsl #32 // CTR block 5
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
st1 { v4.16b}, [x2], #16 // AES block 0 - store result
fmov v7.d[1], x24 // AES block 3 - mov high
orr x9, x11, x9, lsl #32 // CTR block 6
eor v6.16b, v6.16b, v2.16b // AES block 2 - result
st1 { v5.16b}, [x2], #16 // AES block 1 - store result
add w12, w12, #1 // CTR block 6
fmov d2, x10 // CTR block 6
fmov v2.d[1], x9 // CTR block 6
st1 { v6.16b}, [x2], #16 // AES block 2 - store result
rev w9, w12 // CTR block 7
orr x9, x11, x9, lsl #32 // CTR block 7
eor v7.16b, v7.16b, v3.16b // AES block 3 - result
st1 { v7.16b}, [x2], #16 // AES block 3 - store result
b.ge .Lenc_prepretail // do prepretail
.Lenc_main_loop: // main loop start
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d3, x10 // CTR block 4k+3
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
fmov v3.d[1], x9 // CTR block 4k+3
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
eor v4.16b, v4.16b, v11.16b // PRE 1
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x23, x23, x13 // AES block 4k+7 - round N low
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d10, v17.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
eor x22, x22, x14 // AES block 4k+6 - round N high
mov d8, v4.d[1] // GHASH block 4k - mid
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor x19, x19, x13 // AES block 4k+5 - round N low
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
eor x21, x21, x13 // AES block 4k+6 - round N low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
movi v8.8b, #0xc2
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
fmov d5, x19 // AES block 4k+5 - mov low
ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext
b.lt .Lenc_main_loop_continue // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq .Lenc_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
.Lenc_main_loop_continue:
shl d8, d8, #56 // mod_constant
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
add w12, w12, #1 // CTR block 4k+3
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
add x0, x0, #64 // AES input_ptr update
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
rev w9, w12 // CTR block 4k+8
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor x6, x6, x13 // AES block 4k+4 - round N low
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
eor x7, x7, x14 // AES block 4k+4 - round N high
fmov d4, x6 // AES block 4k+4 - mov low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid
eor x20, x20, x14 // AES block 4k+5 - round N high
eor x24, x24, x14 // AES block 4k+7 - round N high
add w12, w12, #1 // CTR block 4k+8
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
fmov d7, x23 // AES block 4k+7 - mov low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
fmov v5.d[1], x20 // AES block 4k+5 - mov high
fmov d6, x21 // AES block 4k+6 - mov low
cmp x0, x5 // .LOOP CONTROL
fmov v6.d[1], x22 // AES block 4k+6 - mov high
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
rev w9, w12 // CTR block 4k+9
add w12, w12, #1 // CTR block 4k+9
eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result
fmov d1, x10 // CTR block 4k+9
orr x9, x11, x9, lsl #32 // CTR block 4k+9
fmov v1.d[1], x9 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
rev w9, w12 // CTR block 4k+10
st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result
orr x9, x11, x9, lsl #32 // CTR block 4k+10
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
fmov v7.d[1], x24 // AES block 4k+7 - mov high
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result
fmov d2, x10 // CTR block 4k+10
st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result
fmov v2.d[1], x9 // CTR block 4k+10
rev w9, w12 // CTR block 4k+11
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
orr x9, x11, x9, lsl #32 // CTR block 4k+11
eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result
st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result
b.lt .Lenc_main_loop
.Lenc_prepretail: // PREPRETAIL
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov d3, x10 // CTR block 4k+3
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
fmov v3.d[1], x9 // CTR block 4k+3
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
add w12, w12, #1 // CTR block 4k+3
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v10.16b, v10.16b, v9.16b // karatsuba tidy up
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
pmull v4.1q, v9.1d, v8.1d
ext v9.16b, v9.16b, v9.16b, #8
eor v10.16b, v10.16b, v11.16b
b.lt .Lenc_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
b.eq .Lenc_finish_prepretail // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
.Lenc_finish_prepretail:
eor v10.16b, v10.16b, v4.16b
eor v10.16b, v10.16b, v9.16b
pmull v4.1q, v10.1d, v8.1d
ext v10.16b, v10.16b, v10.16b, #8
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
eor v11.16b, v11.16b, v4.16b
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b
.Lenc_tail: // TAIL
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
cmp x5, #48
fmov d4, x6 // AES block 4k+4 - mov low
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result
b.gt .Lenc_blocks_more_than_3
cmp x5, #32
mov v3.16b, v2.16b
movi v11.8b, #0
movi v9.8b, #0
sub w12, w12, #1
mov v2.16b, v1.16b
movi v10.8b, #0
b.gt .Lenc_blocks_more_than_2
mov v3.16b, v1.16b
sub w12, w12, #1
cmp x5, #16
b.gt .Lenc_blocks_more_than_1
sub w12, w12, #1
b .Lenc_blocks_less_than_1
.Lenc_blocks_more_than_3: // blocks left > 3
st1 { v5.16b}, [x2], #16 // AES final-3 block - store result
ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-3 block
eor x6, x6, x13 // AES final-2 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor x7, x7, x14 // AES final-2 block - round N high
mov d22, v4.d[1] // GHASH final-3 block - mid
fmov d5, x6 // AES final-2 block - mov low
fmov v5.d[1], x7 // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
mov d10, v17.d[1] // GHASH final-3 block - mid
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor v5.16b, v5.16b, v1.16b // AES final-2 block - result
.Lenc_blocks_more_than_2: // blocks left > 2
st1 { v5.16b}, [x2], #16 // AES final-2 block - store result
ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-2 block
eor x6, x6, x13 // AES final-1 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
fmov d5, x6 // AES final-1 block - mov low
eor x7, x7, x14 // AES final-1 block - round N high
fmov v5.d[1], x7 // AES final-1 block - mov high
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
eor v5.16b, v5.16b, v2.16b // AES final-1 block - result
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
.Lenc_blocks_more_than_1: // blocks left > 1
st1 { v5.16b}, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ldp x6, x7, [x0], #16 // AES final block - load input low & high
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
eor x6, x6, x13 // AES final block - round N low
mov d22, v4.d[1] // GHASH final-1 block - mid
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor x7, x7, x14 // AES final block - round N high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
fmov d5, x6 // AES final block - mov low
fmov v5.d[1], x7 // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
eor v5.16b, v5.16b, v3.16b // AES final block - result
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
.Lenc_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored
mvn x14, xzr // rkN_h = 0xffffffffffffffff
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x6, x13, x14, lt
csel x7, x14, xzr, lt
fmov d0, x6 // ctr0b is mask for last block
fmov v0.d[1], x7
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
mov d8, v4.d[1] // GHASH final block - mid
rev w9, w12
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
str w9, [x16, #12] // store the updated counter
st1 { v5.16b}, [x2] // store all 16B
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
.size aes_gcm_enc_kernel,.-aes_gcm_enc_kernel
.globl aes_gcm_dec_kernel
.hidden aes_gcm_dec_kernel
.type aes_gcm_dec_kernel,%function
.align 4
aes_gcm_dec_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ldr q26, [x8, #128] // load rk8
sub x5, x5, #1 // byte_len - 1
ldr q25, [x8, #112] // load rk7
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
add x4, x0, x1, lsr #3 // end_input_ptr
ldr q24, [x8, #96] // load rk6
lsr x12, x11, #32
ldr q23, [x8, #80] // load rk5
orr w11, w11, w11
ldr q21, [x8, #48] // load rk3
add x5, x5, x0
rev w12, w12 // rev_ctr32
add w12, w12, #1 // increment rev_ctr32
fmov d3, x10 // CTR block 3
rev w9, w12 // CTR block 1
add w12, w12, #1 // CTR block 1
fmov d1, x10 // CTR block 1
orr x9, x11, x9, lsl #32 // CTR block 1
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
fmov d2, x10 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 3
ldr q18, [x8, #0] // load rk0
fmov v3.d[1], x9 // CTR block 3
add w12, w12, #1 // CTR block 3
ldr q22, [x8, #64] // load rk4
ldr q19, [x8, #16] // load rk1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
ldr q14, [x6, #48] // load h3l | h3h
ext v14.16b, v14.16b, v14.16b, #8
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
ldr q15, [x6, #80] // load h4l | h4h
ext v15.16b, v15.16b, v15.16b, #8
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q13, [x6, #32] // load h2l | h2h
ext v13.16b, v13.16b, v13.16b, #8
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q20, [x8, #32] // load rk2
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q27, [x8, #144] // load rk9
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q12, [x6] // load h1l | h1h
ext v12.16b, v12.16b, v12.16b, #8
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q28, [x8, #160] // load rk10
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
ldr q29, [x8, #176] // load rk11
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
b.lt .Ldec_finish_first_blocks // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
b.eq .Ldec_finish_first_blocks // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
.Ldec_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v2.16b, v31.16b // AES block 2 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
aese v3.16b, v31.16b // AES block 3 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
b.ge .Ldec_tail // handle tail
ldr q4, [x0, #0] // AES block 0 - load ciphertext
ldr q5, [x0, #16] // AES block 1 - load ciphertext
rev w9, w12 // CTR block 4
eor v0.16b, v4.16b, v0.16b // AES block 0 - result
eor v1.16b, v5.16b, v1.16b // AES block 1 - result
rev64 v5.16b, v5.16b // GHASH block 1
ldr q7, [x0, #48] // AES block 3 - load ciphertext
mov x7, v0.d[1] // AES block 0 - mov high
mov x6, v0.d[0] // AES block 0 - mov low
rev64 v4.16b, v4.16b // GHASH block 0
add w12, w12, #1 // CTR block 4
fmov d0, x10 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
mov x19, v1.d[0] // AES block 1 - mov low
orr x9, x11, x9, lsl #32 // CTR block 5
mov x20, v1.d[1] // AES block 1 - mov high
eor x7, x7, x14 // AES block 0 - round N high
eor x6, x6, x13 // AES block 0 - round N low
stp x6, x7, [x2], #16 // AES block 0 - store result
fmov d1, x10 // CTR block 5
ldr q6, [x0, #32] // AES block 2 - load ciphertext
add x0, x0, #64 // AES input_ptr update
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
add w12, w12, #1 // CTR block 6
eor x19, x19, x13 // AES block 1 - round N low
orr x9, x11, x9, lsl #32 // CTR block 6
eor x20, x20, x14 // AES block 1 - round N high
stp x19, x20, [x2], #16 // AES block 1 - store result
eor v2.16b, v6.16b, v2.16b // AES block 2 - result
cmp x0, x5 // check if we have <= 8 blocks
b.ge .Ldec_prepretail // do prepretail
.Ldec_main_loop: // main loop start
mov x21, v2.d[0] // AES block 4k+2 - mov low
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
eor v4.16b, v4.16b, v11.16b // PRE 1
rev w9, w12 // CTR block 4k+7
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x23, v3.d[0] // AES block 4k+3 - mov low
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov v3.d[1], x9 // CTR block 4k+7
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor x22, x22, x14 // AES block 4k+2 - round N high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
rev64 v6.16b, v6.16b // GHASH block 4k+2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x21, x21, x13 // AES block 4k+2 - round N low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
rev64 v7.16b, v7.16b // GHASH block 4k+3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor x23, x23, x13 // AES block 4k+3 - round N low
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor x24, x24, x14 // AES block 4k+3 - round N high
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
add w12, w12, #1 // CTR block 4k+7
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
rev w9, w12 // CTR block 4k+8
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
add w12, w12, #1 // CTR block 4k+8
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
movi v8.8b, #0xc2
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
b.lt .Ldec_main_loop_continue // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq .Ldec_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
.Ldec_main_loop_continue:
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext
eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext
ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext
mov x7, v0.d[1] // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
add x0, x0, #64 // AES input_ptr update
mov x6, v0.d[0] // AES block 4k+4 - mov low
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result
rev w9, w12 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+9
cmp x0, x5 // .LOOP CONTROL
add w12, w12, #1 // CTR block 4k+9
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
mov x20, v1.d[1] // AES block 4k+5 - mov high
eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
mov x19, v1.d[0] // AES block 4k+5 - mov low
fmov d1, x10 // CTR block 4k+9
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
fmov v1.d[1], x9 // CTR block 4k+9
rev w9, w12 // CTR block 4k+10
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+10
rev64 v5.16b, v5.16b // GHASH block 4k+5
eor x20, x20, x14 // AES block 4k+5 - round N high
stp x6, x7, [x2], #16 // AES block 4k+4 - store result
eor x19, x19, x13 // AES block 4k+5 - round N low
stp x19, x20, [x2], #16 // AES block 4k+5 - store result
rev64 v4.16b, v4.16b // GHASH block 4k+4
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
b.lt .Ldec_main_loop
.Ldec_prepretail: // PREPRETAIL
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
mov x21, v2.d[0] // AES block 4k+2 - mov low
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
rev w9, w12 // CTR block 4k+7
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v6.16b, v6.16b // GHASH block 4k+2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
mov x23, v3.d[0] // AES block 4k+3 - mov low
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
fmov v3.d[1], x9 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
rev64 v7.16b, v7.16b // GHASH block 4k+3
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
b.lt .Ldec_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
b.eq .Ldec_finish_prepretail // branch if AES-192
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
.Ldec_finish_prepretail:
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor x22, x22, x14 // AES block 4k+2 - round N high
eor x23, x23, x13 // AES block 4k+3 - round N low
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
add w12, w12, #1 // CTR block 4k+7
eor x21, x21, x13 // AES block 4k+2 - round N low
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor x24, x24, x14 // AES block 4k+3 - round N high
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
.Ldec_tail: // TAIL
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext
eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result
mov x6, v0.d[0] // AES block 4k+4 - mov low
mov x7, v0.d[1] // AES block 4k+4 - mov high
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
cmp x5, #48
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
b.gt .Ldec_blocks_more_than_3
sub w12, w12, #1
mov v3.16b, v2.16b
movi v10.8b, #0
movi v11.8b, #0
cmp x5, #32
movi v9.8b, #0
mov v2.16b, v1.16b
b.gt .Ldec_blocks_more_than_2
sub w12, w12, #1
mov v3.16b, v1.16b
cmp x5, #16
b.gt .Ldec_blocks_more_than_1
sub w12, w12, #1
b .Ldec_blocks_less_than_1
.Ldec_blocks_more_than_3: // blocks left > 3
rev64 v4.16b, v5.16b // GHASH final-3 block
ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext
stp x6, x7, [x2], #16 // AES final-3 block - store result
mov d10, v17.d[1] // GHASH final-3 block - mid
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor v0.16b, v5.16b, v1.16b // AES final-2 block - result
mov d22, v4.d[1] // GHASH final-3 block - mid
mov x6, v0.d[0] // AES final-2 block - mov low
mov x7, v0.d[1] // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor x6, x6, x13 // AES final-2 block - round N low
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
eor x7, x7, x14 // AES final-2 block - round N high
.Ldec_blocks_more_than_2: // blocks left > 2
rev64 v4.16b, v5.16b // GHASH final-2 block
ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
stp x6, x7, [x2], #16 // AES final-2 block - store result
eor v0.16b, v5.16b, v2.16b // AES final-1 block - result
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
mov x6, v0.d[0] // AES final-1 block - mov low
mov x7, v0.d[1] // AES final-1 block - mov high
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
movi v8.8b, #0 // suppress further partial tag feed in
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
eor x6, x6, x13 // AES final-1 block - round N low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
eor x7, x7, x14 // AES final-1 block - round N high
.Ldec_blocks_more_than_1: // blocks left > 1
stp x6, x7, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
mov d22, v4.d[1] // GHASH final-1 block - mid
eor v0.16b, v5.16b, v3.16b // AES final block - result
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
mov x6, v0.d[0] // AES final block - mov low
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
mov x7, v0.d[1] // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
eor x6, x6, x13 // AES final block - round N low
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor x7, x7, x14 // AES final block - round N high
.Ldec_blocks_less_than_1: // blocks left <= 1
and x1, x1, #127 // bit_length %= 128
mvn x14, xzr // rkN_h = 0xffffffffffffffff
sub x1, x1, #128 // bit_length -= 128
mvn x13, xzr // rkN_l = 0xffffffffffffffff
ldp x4, x5, [x2] // load existing bytes we need to not overwrite
neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128])
and x1, x1, #127 // bit_length %= 128
lsr x14, x14, x1 // rkN_h is mask for top 64b of last block
cmp x1, #64
csel x9, x13, x14, lt
csel x10, x14, xzr, lt
fmov d0, x9 // ctr0b is mask for last block
and x6, x6, x9
mov v0.d[1], x10
bic x4, x4, x9 // mask out low existing bytes
rev w9, w12
bic x5, x5, x10 // mask out high existing bytes
orr x6, x6, x4
and x7, x7, x10
orr x7, x7, x5
and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
mov d8, v4.d[1] // GHASH final block - mid
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
stp x6, x7, [x2]
str w9, [x16, #12] // store the updated counter
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
.size aes_gcm_dec_kernel,.-aes_gcm_dec_kernel
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 30,641
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/armv8-mont-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.text
.globl _bn_mul_mont_nohw
.private_extern _bn_mul_mont_nohw
.align 5
_bn_mul_mont_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
ldr x9,[x2],#8 // bp[0]
sub x22,sp,x5,lsl#3
ldp x7,x8,[x1],#16 // ap[0..1]
lsl x5,x5,#3
ldr x4,[x4] // *n0
and x22,x22,#-16 // ABI says so
ldp x13,x14,[x3],#16 // np[0..1]
mul x6,x7,x9 // ap[0]*bp[0]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
mul x10,x8,x9 // ap[1]*bp[0]
umulh x11,x8,x9
mul x15,x6,x4 // "tp[0]"*n0
mov sp,x22 // alloca
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6 // discarded
// (*) As for removal of first multiplication and addition
// instructions. The outcome of first addition is
// guaranteed to be zero, which leaves two computationally
// significant outcomes: it either carries or not. Then
// question is when does it carry? Is there alternative
// way to deduce it? If you follow operations, you can
// observe that condition for carry is quite simple:
// x6 being non-zero. So that carry can be calculated
// by adding -1 to x6. That's what next instruction does.
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
adc x13,x13,xzr
cbz x21,L1st_skip
L1st:
ldr x8,[x1],#8
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
ldr x14,[x3],#8
adds x12,x16,x13
mul x10,x8,x9 // ap[j]*bp[0]
adc x13,x17,xzr
umulh x11,x8,x9
adds x12,x12,x6
mul x16,x14,x15 // np[j]*m1
adc x13,x13,xzr
umulh x17,x14,x15
str x12,[x22],#8 // tp[j-1]
cbnz x21,L1st
L1st_skip:
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adc x13,x17,xzr
adds x12,x12,x6
sub x20,x5,#8 // i=num-1
adcs x13,x13,x7
adc x19,xzr,xzr // upmost overflow bit
stp x12,x13,[x22]
Louter:
ldr x9,[x2],#8 // bp[i]
ldp x7,x8,[x1],#16
ldr x23,[sp] // tp[0]
add x22,sp,#8
mul x6,x7,x9 // ap[0]*bp[i]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
ldp x13,x14,[x3],#16
mul x10,x8,x9 // ap[1]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x15,x6,x4
sub x20,x20,#8 // i--
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
cbz x21,Linner_skip
Linner:
ldr x8,[x1],#8
adc x13,x13,xzr
ldr x23,[x22],#8 // tp[j]
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
adds x12,x16,x13
ldr x14,[x3],#8
adc x13,x17,xzr
mul x10,x8,x9 // ap[j]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x16,x14,x15 // np[j]*m1
adds x12,x12,x6
umulh x17,x14,x15
str x12,[x22,#-16] // tp[j-1]
cbnz x21,Linner
Linner_skip:
ldr x23,[x22],#8 // tp[j]
adc x13,x13,xzr
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adcs x13,x17,x19
adc x19,xzr,xzr
adds x6,x6,x23
adc x7,x7,xzr
adds x12,x12,x6
adcs x13,x13,x7
adc x19,x19,xzr // upmost overflow bit
stp x12,x13,[x22,#-16]
cbnz x20,Louter
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x14,[x3],#8 // np[0]
subs x21,x5,#8 // j=num-1 and clear borrow
mov x1,x0
Lsub:
sbcs x8,x23,x14 // tp[j]-np[j]
ldr x23,[x22],#8
sub x21,x21,#8 // j--
ldr x14,[x3],#8
str x8,[x1],#8 // rp[j]=tp[j]-np[j]
cbnz x21,Lsub
sbcs x8,x23,x14
sbcs x19,x19,xzr // did it borrow?
str x8,[x1],#8 // rp[num-1]
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x8,[x0],#8 // rp[0]
sub x5,x5,#8 // num--
nop
Lcond_copy:
sub x5,x5,#8 // num--
csel x14,x23,x8,lo // did it borrow?
ldr x23,[x22],#8
ldr x8,[x0],#8
str xzr,[x22,#-16] // wipe tp
str x14,[x0,#-16]
cbnz x5,Lcond_copy
csel x14,x23,x8,lo
str xzr,[x22,#-8] // wipe tp
str x14,[x0,#-8]
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldr x29,[sp],#64
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _bn_sqr8x_mont
.private_extern _bn_sqr8x_mont
.align 5
_bn_sqr8x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x3,[sp,#96] // offload rp and np
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
ldp x12,x13,[x1,#8*6]
sub x2,sp,x5,lsl#4
lsl x5,x5,#3
ldr x4,[x4] // *n0
mov sp,x2 // alloca
sub x27,x5,#8*8
b Lsqr8x_zero_start
Lsqr8x_zero:
sub x27,x27,#8*8
stp xzr,xzr,[x2,#8*0]
stp xzr,xzr,[x2,#8*2]
stp xzr,xzr,[x2,#8*4]
stp xzr,xzr,[x2,#8*6]
Lsqr8x_zero_start:
stp xzr,xzr,[x2,#8*8]
stp xzr,xzr,[x2,#8*10]
stp xzr,xzr,[x2,#8*12]
stp xzr,xzr,[x2,#8*14]
add x2,x2,#8*16
cbnz x27,Lsqr8x_zero
add x3,x1,x5
add x1,x1,#8*8
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
mov x23,xzr
mov x24,xzr
mov x25,xzr
mov x26,xzr
mov x2,sp
str x4,[x29,#112] // offload n0
// Multiply everything but a[i]*a[i]
.align 4
Lsqr8x_outer_loop:
// a[1]a[0] (i)
// a[2]a[0]
// a[3]a[0]
// a[4]a[0]
// a[5]a[0]
// a[6]a[0]
// a[7]a[0]
// a[2]a[1] (ii)
// a[3]a[1]
// a[4]a[1]
// a[5]a[1]
// a[6]a[1]
// a[7]a[1]
// a[3]a[2] (iii)
// a[4]a[2]
// a[5]a[2]
// a[6]a[2]
// a[7]a[2]
// a[4]a[3] (iv)
// a[5]a[3]
// a[6]a[3]
// a[7]a[3]
// a[5]a[4] (v)
// a[6]a[4]
// a[7]a[4]
// a[6]a[5] (vi)
// a[7]a[5]
// a[7]a[6] (vii)
mul x14,x7,x6 // lo(a[1..7]*a[0]) (i)
mul x15,x8,x6
mul x16,x9,x6
mul x17,x10,x6
adds x20,x20,x14 // t[1]+lo(a[1]*a[0])
mul x14,x11,x6
adcs x21,x21,x15
mul x15,x12,x6
adcs x22,x22,x16
mul x16,x13,x6
adcs x23,x23,x17
umulh x17,x7,x6 // hi(a[1..7]*a[0])
adcs x24,x24,x14
umulh x14,x8,x6
adcs x25,x25,x15
umulh x15,x9,x6
adcs x26,x26,x16
umulh x16,x10,x6
stp x19,x20,[x2],#8*2 // t[0..1]
adc x19,xzr,xzr // t[8]
adds x21,x21,x17 // t[2]+lo(a[1]*a[0])
umulh x17,x11,x6
adcs x22,x22,x14
umulh x14,x12,x6
adcs x23,x23,x15
umulh x15,x13,x6
adcs x24,x24,x16
mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii)
adcs x25,x25,x17
mul x17,x9,x7
adcs x26,x26,x14
mul x14,x10,x7
adc x19,x19,x15
mul x15,x11,x7
adds x22,x22,x16
mul x16,x12,x7
adcs x23,x23,x17
mul x17,x13,x7
adcs x24,x24,x14
umulh x14,x8,x7 // hi(a[2..7]*a[1])
adcs x25,x25,x15
umulh x15,x9,x7
adcs x26,x26,x16
umulh x16,x10,x7
adcs x19,x19,x17
umulh x17,x11,x7
stp x21,x22,[x2],#8*2 // t[2..3]
adc x20,xzr,xzr // t[9]
adds x23,x23,x14
umulh x14,x12,x7
adcs x24,x24,x15
umulh x15,x13,x7
adcs x25,x25,x16
mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii)
adcs x26,x26,x17
mul x17,x10,x8
adcs x19,x19,x14
mul x14,x11,x8
adc x20,x20,x15
mul x15,x12,x8
adds x24,x24,x16
mul x16,x13,x8
adcs x25,x25,x17
umulh x17,x9,x8 // hi(a[3..7]*a[2])
adcs x26,x26,x14
umulh x14,x10,x8
adcs x19,x19,x15
umulh x15,x11,x8
adcs x20,x20,x16
umulh x16,x12,x8
stp x23,x24,[x2],#8*2 // t[4..5]
adc x21,xzr,xzr // t[10]
adds x25,x25,x17
umulh x17,x13,x8
adcs x26,x26,x14
mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv)
adcs x19,x19,x15
mul x15,x11,x9
adcs x20,x20,x16
mul x16,x12,x9
adc x21,x21,x17
mul x17,x13,x9
adds x26,x26,x14
umulh x14,x10,x9 // hi(a[4..7]*a[3])
adcs x19,x19,x15
umulh x15,x11,x9
adcs x20,x20,x16
umulh x16,x12,x9
adcs x21,x21,x17
umulh x17,x13,x9
stp x25,x26,[x2],#8*2 // t[6..7]
adc x22,xzr,xzr // t[11]
adds x19,x19,x14
mul x14,x11,x10 // lo(a[5..7]*a[4]) (v)
adcs x20,x20,x15
mul x15,x12,x10
adcs x21,x21,x16
mul x16,x13,x10
adc x22,x22,x17
umulh x17,x11,x10 // hi(a[5..7]*a[4])
adds x20,x20,x14
umulh x14,x12,x10
adcs x21,x21,x15
umulh x15,x13,x10
adcs x22,x22,x16
mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi)
adc x23,xzr,xzr // t[12]
adds x21,x21,x17
mul x17,x13,x11
adcs x22,x22,x14
umulh x14,x12,x11 // hi(a[6..7]*a[5])
adc x23,x23,x15
umulh x15,x13,x11
adds x22,x22,x16
mul x16,x13,x12 // lo(a[7]*a[6]) (vii)
adcs x23,x23,x17
umulh x17,x13,x12 // hi(a[7]*a[6])
adc x24,xzr,xzr // t[13]
adds x23,x23,x14
sub x27,x3,x1 // done yet?
adc x24,x24,x15
adds x24,x24,x16
sub x14,x3,x5 // rewinded ap
adc x25,xzr,xzr // t[14]
add x25,x25,x17
cbz x27,Lsqr8x_outer_break
mov x4,x6
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x0,x1
adcs x26,xzr,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved below
mov x27,#-8*8
// a[8]a[0]
// a[9]a[0]
// a[a]a[0]
// a[b]a[0]
// a[c]a[0]
// a[d]a[0]
// a[e]a[0]
// a[f]a[0]
// a[8]a[1]
// a[f]a[1]........................
// a[8]a[2]
// a[f]a[2]........................
// a[8]a[3]
// a[f]a[3]........................
// a[8]a[4]
// a[f]a[4]........................
// a[8]a[5]
// a[f]a[5]........................
// a[8]a[6]
// a[f]a[6]........................
// a[8]a[7]
// a[f]a[7]........................
Lsqr8x_mul:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_mul
// note that carry flag is guaranteed
// to be zero at this point
cmp x1,x3 // done yet?
b.eq Lsqr8x_break
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
ldr x4,[x0,#-8*8]
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_mul
.align 4
Lsqr8x_break:
ldp x6,x7,[x0,#8*0]
add x1,x0,#8*8
ldp x8,x9,[x0,#8*2]
sub x14,x3,x1 // is it last iteration?
ldp x10,x11,[x0,#8*4]
sub x15,x2,x14
ldp x12,x13,[x0,#8*6]
cbz x14,Lsqr8x_outer_loop
stp x19,x20,[x2,#8*0]
ldp x19,x20,[x15,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x15,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x15,#8*4]
stp x25,x26,[x2,#8*6]
mov x2,x15
ldp x25,x26,[x15,#8*6]
b Lsqr8x_outer_loop
.align 4
Lsqr8x_outer_break:
// Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0]
ldp x15,x16,[sp,#8*1]
ldp x11,x13,[x14,#8*2]
add x1,x14,#8*4
ldp x17,x14,[sp,#8*3]
stp x19,x20,[x2,#8*0]
mul x19,x7,x7
stp x21,x22,[x2,#8*2]
umulh x7,x7,x7
stp x23,x24,[x2,#8*4]
mul x8,x9,x9
stp x25,x26,[x2,#8*6]
mov x2,sp
umulh x9,x9,x9
adds x20,x7,x15,lsl#1
extr x15,x16,x15,#63
sub x27,x5,#8*4
Lsqr4x_shift_n_add:
adcs x21,x8,x15
extr x16,x17,x16,#63
sub x27,x27,#8*4
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
ldp x7,x9,[x1],#8*2
umulh x11,x11,x11
mul x12,x13,x13
umulh x13,x13,x13
extr x17,x14,x17,#63
stp x19,x20,[x2,#8*0]
adcs x23,x10,x17
extr x14,x15,x14,#63
stp x21,x22,[x2,#8*2]
adcs x24,x11,x14
ldp x17,x14,[x2,#8*7]
extr x15,x16,x15,#63
adcs x25,x12,x15
extr x16,x17,x16,#63
adcs x26,x13,x16
ldp x15,x16,[x2,#8*9]
mul x6,x7,x7
ldp x11,x13,[x1],#8*2
umulh x7,x7,x7
mul x8,x9,x9
umulh x9,x9,x9
stp x23,x24,[x2,#8*4]
extr x17,x14,x17,#63
stp x25,x26,[x2,#8*6]
add x2,x2,#8*8
adcs x19,x6,x17
extr x14,x15,x14,#63
adcs x20,x7,x14
ldp x17,x14,[x2,#8*3]
extr x15,x16,x15,#63
cbnz x27,Lsqr4x_shift_n_add
ldp x1,x4,[x29,#104] // pull np and n0
adcs x21,x8,x15
extr x16,x17,x16,#63
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
umulh x11,x11,x11
stp x19,x20,[x2,#8*0]
mul x12,x13,x13
umulh x13,x13,x13
stp x21,x22,[x2,#8*2]
extr x17,x14,x17,#63
adcs x23,x10,x17
extr x14,x15,x14,#63
ldp x19,x20,[sp,#8*0]
adcs x24,x11,x14
extr x15,x16,x15,#63
ldp x6,x7,[x1,#8*0]
adcs x25,x12,x15
extr x16,xzr,x16,#63
ldp x8,x9,[x1,#8*2]
adc x26,x13,x16
ldp x10,x11,[x1,#8*4]
// Reduce by 512 bits per iteration
mul x28,x4,x19 // t[0]*n0
ldp x12,x13,[x1,#8*6]
add x3,x1,x5
ldp x21,x22,[sp,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[sp,#8*4]
stp x25,x26,[x2,#8*6]
ldp x25,x26,[sp,#8*6]
add x1,x1,#8*8
mov x30,xzr // initial top-most carry
mov x2,sp
mov x27,#8
Lsqr8x_reduction:
// (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0)
mul x15,x7,x28
sub x27,x27,#1
mul x16,x8,x28
str x28,[x2],#8 // put aside t[0]*n0 for tail processing
mul x17,x9,x28
// (*) adds xzr,x19,x14
subs xzr,x19,#1 // (*)
mul x14,x10,x28
adcs x19,x20,x15
mul x15,x11,x28
adcs x20,x21,x16
mul x16,x12,x28
adcs x21,x22,x17
mul x17,x13,x28
adcs x22,x23,x14
umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0)
adcs x23,x24,x15
umulh x15,x7,x28
adcs x24,x25,x16
umulh x16,x8,x28
adcs x25,x26,x17
umulh x17,x9,x28
adc x26,xzr,xzr
adds x19,x19,x14
umulh x14,x10,x28
adcs x20,x20,x15
umulh x15,x11,x28
adcs x21,x21,x16
umulh x16,x12,x28
adcs x22,x22,x17
umulh x17,x13,x28
mul x28,x4,x19 // next t[0]*n0
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adc x26,x26,x17
cbnz x27,Lsqr8x_reduction
ldp x14,x15,[x2,#8*0]
ldp x16,x17,[x2,#8*2]
mov x0,x2
sub x27,x3,x1 // done yet?
adds x19,x19,x14
adcs x20,x20,x15
ldp x14,x15,[x2,#8*4]
adcs x21,x21,x16
adcs x22,x22,x17
ldp x16,x17,[x2,#8*6]
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adcs x26,x26,x17
//adc x28,xzr,xzr // moved below
cbz x27,Lsqr8x8_post_condition
ldr x4,[x2,#-8*8]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
mov x27,#-8*8
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
Lsqr8x_tail:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_tail
// note that carry flag is guaranteed
// to be zero at this point
ldp x6,x7,[x2,#8*0]
sub x27,x3,x1 // done yet?
sub x16,x3,x5 // rewinded np
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
cbz x27,Lsqr8x_tail_break
ldr x4,[x0,#-8*8]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_tail
.align 4
Lsqr8x_tail_break:
ldr x4,[x29,#112] // pull n0
add x27,x2,#8*8 // end of current t[num] window
subs xzr,x30,#1 // "move" top-most carry to carry bit
adcs x14,x19,x6
adcs x15,x20,x7
ldp x19,x20,[x0,#8*0]
adcs x21,x21,x8
ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0]
adcs x22,x22,x9
ldp x8,x9,[x16,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x16,#8*4]
adcs x25,x25,x12
adcs x26,x26,x13
ldp x12,x13,[x16,#8*6]
add x1,x16,#8*8
adc x30,xzr,xzr // top-most carry
mul x28,x4,x19
stp x14,x15,[x2,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x0,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x0,#8*4]
cmp x27,x29 // did we hit the bottom?
stp x25,x26,[x2,#8*6]
mov x2,x0 // slide the window
ldp x25,x26,[x0,#8*6]
mov x27,#8
b.ne Lsqr8x_reduction
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x0,[x29,#96] // pull rp
add x2,x2,#8*8
subs x14,x19,x6
sbcs x15,x20,x7
sub x27,x5,#8*8
mov x3,x0 // x0 copy
Lsqr8x_sub:
sbcs x16,x21,x8
ldp x6,x7,[x1,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x1,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x10,x11,[x1,#8*4]
sbcs x17,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
ldp x19,x20,[x2,#8*0]
sub x27,x27,#8*8
ldp x21,x22,[x2,#8*2]
ldp x23,x24,[x2,#8*4]
ldp x25,x26,[x2,#8*6]
add x2,x2,#8*8
stp x14,x15,[x0,#8*4]
sbcs x14,x19,x6
stp x16,x17,[x0,#8*6]
add x0,x0,#8*8
sbcs x15,x20,x7
cbnz x27,Lsqr8x_sub
sbcs x16,x21,x8
mov x2,sp
add x1,sp,x5
ldp x6,x7,[x3,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x3,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x19,x20,[x1,#8*0]
sbcs x17,x26,x13
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
stp x14,x15,[x0,#8*4]
stp x16,x17,[x0,#8*6]
sub x27,x5,#8*4
Lsqr4x_cond_copy:
sub x27,x27,#8*4
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
ldp x6,x7,[x3,#8*4]
ldp x19,x20,[x1,#8*4]
csel x16,x21,x8,lo
stp xzr,xzr,[x2,#8*2]
add x2,x2,#8*4
csel x17,x22,x9,lo
ldp x8,x9,[x3,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
add x3,x3,#8*4
stp xzr,xzr,[x1,#8*0]
stp xzr,xzr,[x1,#8*2]
cbnz x27,Lsqr4x_cond_copy
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
stp xzr,xzr,[x2,#8*2]
csel x16,x21,x8,lo
csel x17,x22,x9,lo
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
b Lsqr8x_done
.align 4
Lsqr8x8_post_condition:
adc x28,xzr,xzr
ldr x30,[x29,#8] // pull return address
// x19-7,x28 hold result, x6-7 hold modulus
subs x6,x19,x6
ldr x1,[x29,#96] // pull rp
sbcs x7,x20,x7
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x8
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x9
stp xzr,xzr,[sp,#8*4]
sbcs x10,x23,x10
stp xzr,xzr,[sp,#8*6]
sbcs x11,x24,x11
stp xzr,xzr,[sp,#8*8]
sbcs x12,x25,x12
stp xzr,xzr,[sp,#8*10]
sbcs x13,x26,x13
stp xzr,xzr,[sp,#8*12]
sbcs x28,x28,xzr // did it borrow?
stp xzr,xzr,[sp,#8*14]
// x6-7 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
csel x10,x23,x10,lo
csel x11,x24,x11,lo
stp x8,x9,[x1,#8*2]
csel x12,x25,x12,lo
csel x13,x26,x13,lo
stp x10,x11,[x1,#8*4]
stp x12,x13,[x1,#8*6]
Lsqr8x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _bn_mul4x_mont
.private_extern _bn_mul4x_mont
.align 5
_bn_mul4x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub x26,sp,x5,lsl#3
lsl x5,x5,#3
ldr x4,[x4] // *n0
sub sp,x26,#8*4 // alloca
add x10,x2,x5
add x27,x1,x5
stp x0,x10,[x29,#96] // offload rp and &b[num]
ldr x24,[x2,#8*0] // b[0]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
ldp x14,x15,[x3,#8*0] // n[0..3]
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
mov x28,#0
mov x26,sp
Loop_mul4x_1st_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[0])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[0])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
// (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0)
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0)
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
sub x10,x27,x1
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_reduction
cbz x10,Lmul4x4_post_condition
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldr x25,[sp] // a[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
Loop_mul4x_1st_tail:
mul x10,x6,x24 // lo(a[4..7]*b[i])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[i])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*a[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
adcs x23,x23,x0
umulh x13,x17,x25
adc x0,xzr,xzr
ldr x25,[sp,x28] // next t[0]*n0
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_tail
sub x11,x27,x5 // rewinded x1
cbz x10,Lmul4x_proceed
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_1st_tail
.align 5
Lmul4x_proceed:
ldr x24,[x2,#8*4]! // *++b
adc x30,x0,xzr
ldp x6,x7,[x11,#8*0] // a[0..3]
sub x3,x3,x5 // rewind np
ldp x8,x9,[x11,#8*2]
add x1,x11,#8*4
stp x19,x20,[x26,#8*0] // result!!!
ldp x19,x20,[sp,#8*4] // t[0..3]
stp x21,x22,[x26,#8*2] // result!!!
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x3,#8*0] // n[0..3]
mov x26,sp
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
.align 4
Loop_mul4x_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[4])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
// (*) mul x10,x14,x25
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25 // lo(n[0..3]*t[0]*n0
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_reduction
adc x0,x0,xzr
ldp x10,x11,[x26,#8*4] // t[4..7]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldr x25,[sp] // t[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.align 4
Loop_mul4x_tail:
mul x10,x6,x24 // lo(a[4..7]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[4])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*t[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
umulh x13,x17,x25
adcs x23,x23,x0
ldr x25,[sp,x28] // next a[0]*n0
adc x0,xzr,xzr
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_tail
sub x11,x3,x5 // rewinded np?
adc x0,x0,xzr
cbz x10,Loop_mul4x_break
ldp x10,x11,[x26,#8*4]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_tail
.align 4
Loop_mul4x_break:
ldp x12,x13,[x29,#96] // pull rp and &b[num]
adds x19,x19,x30
add x2,x2,#8*4 // bp++
adcs x20,x20,xzr
sub x1,x1,x5 // rewind ap
adcs x21,x21,xzr
stp x19,x20,[x26,#8*0] // result!!!
adcs x22,x22,xzr
ldp x19,x20,[sp,#8*4] // t[0..3]
adc x30,x0,xzr
stp x21,x22,[x26,#8*2] // result!!!
cmp x2,x13 // done yet?
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x11,#8*0] // n[0..3]
ldp x16,x17,[x11,#8*2]
add x3,x11,#8*4
b.eq Lmul4x_post
ldr x24,[x2]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
adds x1,x1,#8*4 // clear carry bit
mov x0,xzr
mov x26,sp
b Loop_mul4x_reduction
.align 4
Lmul4x_post:
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
mov x0,x12
mov x27,x12 // x0 copy
subs x10,x19,x14
add x26,sp,#8*8
sbcs x11,x20,x15
sub x28,x5,#8*4
Lmul4x_sub:
sbcs x12,x21,x16
ldp x14,x15,[x3,#8*0]
sub x28,x28,#8*4
ldp x19,x20,[x26,#8*0]
sbcs x13,x22,x17
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
ldp x21,x22,[x26,#8*2]
add x26,x26,#8*4
stp x10,x11,[x0,#8*0]
sbcs x10,x19,x14
stp x12,x13,[x0,#8*2]
add x0,x0,#8*4
sbcs x11,x20,x15
cbnz x28,Lmul4x_sub
sbcs x12,x21,x16
mov x26,sp
add x1,sp,#8*4
ldp x6,x7,[x27,#8*0]
sbcs x13,x22,x17
stp x10,x11,[x0,#8*0]
ldp x8,x9,[x27,#8*2]
stp x12,x13,[x0,#8*2]
ldp x19,x20,[x1,#8*0]
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
sub x28,x5,#8*4
Lmul4x_cond_copy:
sub x28,x28,#8*4
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
ldp x6,x7,[x27,#8*4]
ldp x19,x20,[x1,#8*4]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*2]
add x26,x26,#8*4
csel x13,x22,x9,lo
ldp x8,x9,[x27,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
add x27,x27,#8*4
cbnz x28,Lmul4x_cond_copy
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
stp xzr,xzr,[x26,#8*2]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*3]
csel x13,x22,x9,lo
stp xzr,xzr,[x26,#8*4]
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
b Lmul4x_done
.align 4
Lmul4x4_post_condition:
adc x0,x0,xzr
ldr x1,[x29,#96] // pull rp
// x19-3,x0 hold result, x14-7 hold modulus
subs x6,x19,x14
ldr x30,[x29,#8] // pull return address
sbcs x7,x20,x15
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x16
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x17
stp xzr,xzr,[sp,#8*4]
sbcs xzr,x0,xzr // did it borrow?
stp xzr,xzr,[sp,#8*6]
// x6-3 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
stp x8,x9,[x1,#8*2]
Lmul4x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 4
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 30,277
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.section __DATA,__const
.p2align 6
L$zero:
.long 0,0,0,0
L$one:
.long 1,0,0,0
L$inc:
.long 0,1,2,3
L$four:
.long 4,4,4,4
L$incy:
.long 0,2,4,6,1,3,5,7
L$eight:
.long 8,8,8,8,8,8,8,8
L$rot16:
.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
L$rot24:
.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
L$sigma:
.byte 101,120,112,97,110,100,32,51,50,45,98,121,116,101,32,107,0
.p2align 6
L$zeroz:
.long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
L$fourz:
.long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
L$incz:
.long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
L$sixteen:
.long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl _ChaCha20_ctr32_nohw
.private_extern _ChaCha20_ctr32_nohw
.p2align 6
_ChaCha20_ctr32_nohw:
_CET_ENDBR
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $64+24,%rsp
L$ctr32_body:
movdqu (%rcx),%xmm1
movdqu 16(%rcx),%xmm2
movdqu (%r8),%xmm3
movdqa L$one(%rip),%xmm4
movdqa %xmm1,16(%rsp)
movdqa %xmm2,32(%rsp)
movdqa %xmm3,48(%rsp)
movq %rdx,%rbp
jmp L$oop_outer
.p2align 5
L$oop_outer:
movl $0x61707865,%eax
movl $0x3320646e,%ebx
movl $0x79622d32,%ecx
movl $0x6b206574,%edx
movl 16(%rsp),%r8d
movl 20(%rsp),%r9d
movl 24(%rsp),%r10d
movl 28(%rsp),%r11d
movd %xmm3,%r12d
movl 52(%rsp),%r13d
movl 56(%rsp),%r14d
movl 60(%rsp),%r15d
movq %rbp,64+0(%rsp)
movl $10,%ebp
movq %rsi,64+8(%rsp)
.byte 102,72,15,126,214
movq %rdi,64+16(%rsp)
movq %rsi,%rdi
shrq $32,%rdi
jmp L$oop
.p2align 5
L$oop:
addl %r8d,%eax
xorl %eax,%r12d
roll $16,%r12d
addl %r9d,%ebx
xorl %ebx,%r13d
roll $16,%r13d
addl %r12d,%esi
xorl %esi,%r8d
roll $12,%r8d
addl %r13d,%edi
xorl %edi,%r9d
roll $12,%r9d
addl %r8d,%eax
xorl %eax,%r12d
roll $8,%r12d
addl %r9d,%ebx
xorl %ebx,%r13d
roll $8,%r13d
addl %r12d,%esi
xorl %esi,%r8d
roll $7,%r8d
addl %r13d,%edi
xorl %edi,%r9d
roll $7,%r9d
movl %esi,32(%rsp)
movl %edi,36(%rsp)
movl 40(%rsp),%esi
movl 44(%rsp),%edi
addl %r10d,%ecx
xorl %ecx,%r14d
roll $16,%r14d
addl %r11d,%edx
xorl %edx,%r15d
roll $16,%r15d
addl %r14d,%esi
xorl %esi,%r10d
roll $12,%r10d
addl %r15d,%edi
xorl %edi,%r11d
roll $12,%r11d
addl %r10d,%ecx
xorl %ecx,%r14d
roll $8,%r14d
addl %r11d,%edx
xorl %edx,%r15d
roll $8,%r15d
addl %r14d,%esi
xorl %esi,%r10d
roll $7,%r10d
addl %r15d,%edi
xorl %edi,%r11d
roll $7,%r11d
addl %r9d,%eax
xorl %eax,%r15d
roll $16,%r15d
addl %r10d,%ebx
xorl %ebx,%r12d
roll $16,%r12d
addl %r15d,%esi
xorl %esi,%r9d
roll $12,%r9d
addl %r12d,%edi
xorl %edi,%r10d
roll $12,%r10d
addl %r9d,%eax
xorl %eax,%r15d
roll $8,%r15d
addl %r10d,%ebx
xorl %ebx,%r12d
roll $8,%r12d
addl %r15d,%esi
xorl %esi,%r9d
roll $7,%r9d
addl %r12d,%edi
xorl %edi,%r10d
roll $7,%r10d
movl %esi,40(%rsp)
movl %edi,44(%rsp)
movl 32(%rsp),%esi
movl 36(%rsp),%edi
addl %r11d,%ecx
xorl %ecx,%r13d
roll $16,%r13d
addl %r8d,%edx
xorl %edx,%r14d
roll $16,%r14d
addl %r13d,%esi
xorl %esi,%r11d
roll $12,%r11d
addl %r14d,%edi
xorl %edi,%r8d
roll $12,%r8d
addl %r11d,%ecx
xorl %ecx,%r13d
roll $8,%r13d
addl %r8d,%edx
xorl %edx,%r14d
roll $8,%r14d
addl %r13d,%esi
xorl %esi,%r11d
roll $7,%r11d
addl %r14d,%edi
xorl %edi,%r8d
roll $7,%r8d
decl %ebp
jnz L$oop
movl %edi,36(%rsp)
movl %esi,32(%rsp)
movq 64(%rsp),%rbp
movdqa %xmm2,%xmm1
movq 64+8(%rsp),%rsi
paddd %xmm4,%xmm3
movq 64+16(%rsp),%rdi
addl $0x61707865,%eax
addl $0x3320646e,%ebx
addl $0x79622d32,%ecx
addl $0x6b206574,%edx
addl 16(%rsp),%r8d
addl 20(%rsp),%r9d
addl 24(%rsp),%r10d
addl 28(%rsp),%r11d
addl 48(%rsp),%r12d
addl 52(%rsp),%r13d
addl 56(%rsp),%r14d
addl 60(%rsp),%r15d
paddd 32(%rsp),%xmm1
cmpq $64,%rbp
jb L$tail
xorl 0(%rsi),%eax
xorl 4(%rsi),%ebx
xorl 8(%rsi),%ecx
xorl 12(%rsi),%edx
xorl 16(%rsi),%r8d
xorl 20(%rsi),%r9d
xorl 24(%rsi),%r10d
xorl 28(%rsi),%r11d
movdqu 32(%rsi),%xmm0
xorl 48(%rsi),%r12d
xorl 52(%rsi),%r13d
xorl 56(%rsi),%r14d
xorl 60(%rsi),%r15d
leaq 64(%rsi),%rsi
pxor %xmm1,%xmm0
movdqa %xmm2,32(%rsp)
movd %xmm3,48(%rsp)
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
movdqu %xmm0,32(%rdi)
movl %r12d,48(%rdi)
movl %r13d,52(%rdi)
movl %r14d,56(%rdi)
movl %r15d,60(%rdi)
leaq 64(%rdi),%rdi
subq $64,%rbp
jnz L$oop_outer
jmp L$done
.p2align 4
L$tail:
movl %eax,0(%rsp)
movl %ebx,4(%rsp)
xorq %rbx,%rbx
movl %ecx,8(%rsp)
movl %edx,12(%rsp)
movl %r8d,16(%rsp)
movl %r9d,20(%rsp)
movl %r10d,24(%rsp)
movl %r11d,28(%rsp)
movdqa %xmm1,32(%rsp)
movl %r12d,48(%rsp)
movl %r13d,52(%rsp)
movl %r14d,56(%rsp)
movl %r15d,60(%rsp)
L$oop_tail:
movzbl (%rsi,%rbx,1),%eax
movzbl (%rsp,%rbx,1),%edx
leaq 1(%rbx),%rbx
xorl %edx,%eax
movb %al,-1(%rdi,%rbx,1)
decq %rbp
jnz L$oop_tail
L$done:
leaq 64+24+48(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$no_data:
ret
.globl _ChaCha20_ctr32_ssse3_4x
.private_extern _ChaCha20_ctr32_ssse3_4x
.p2align 5
_ChaCha20_ctr32_ssse3_4x:
_CET_ENDBR
movq %rsp,%r9
subq $0x140+8,%rsp
movdqa L$sigma(%rip),%xmm11
movdqu (%rcx),%xmm15
movdqu 16(%rcx),%xmm7
movdqu (%r8),%xmm3
leaq 256(%rsp),%rcx
leaq L$rot16(%rip),%r10
leaq L$rot24(%rip),%r11
pshufd $0x00,%xmm11,%xmm8
pshufd $0x55,%xmm11,%xmm9
movdqa %xmm8,64(%rsp)
pshufd $0xaa,%xmm11,%xmm10
movdqa %xmm9,80(%rsp)
pshufd $0xff,%xmm11,%xmm11
movdqa %xmm10,96(%rsp)
movdqa %xmm11,112(%rsp)
pshufd $0x00,%xmm15,%xmm12
pshufd $0x55,%xmm15,%xmm13
movdqa %xmm12,128-256(%rcx)
pshufd $0xaa,%xmm15,%xmm14
movdqa %xmm13,144-256(%rcx)
pshufd $0xff,%xmm15,%xmm15
movdqa %xmm14,160-256(%rcx)
movdqa %xmm15,176-256(%rcx)
pshufd $0x00,%xmm7,%xmm4
pshufd $0x55,%xmm7,%xmm5
movdqa %xmm4,192-256(%rcx)
pshufd $0xaa,%xmm7,%xmm6
movdqa %xmm5,208-256(%rcx)
pshufd $0xff,%xmm7,%xmm7
movdqa %xmm6,224-256(%rcx)
movdqa %xmm7,240-256(%rcx)
pshufd $0x00,%xmm3,%xmm0
pshufd $0x55,%xmm3,%xmm1
paddd L$inc(%rip),%xmm0
pshufd $0xaa,%xmm3,%xmm2
movdqa %xmm1,272-256(%rcx)
pshufd $0xff,%xmm3,%xmm3
movdqa %xmm2,288-256(%rcx)
movdqa %xmm3,304-256(%rcx)
jmp L$oop_enter4x
.p2align 5
L$oop_outer4x:
movdqa 64(%rsp),%xmm8
movdqa 80(%rsp),%xmm9
movdqa 96(%rsp),%xmm10
movdqa 112(%rsp),%xmm11
movdqa 128-256(%rcx),%xmm12
movdqa 144-256(%rcx),%xmm13
movdqa 160-256(%rcx),%xmm14
movdqa 176-256(%rcx),%xmm15
movdqa 192-256(%rcx),%xmm4
movdqa 208-256(%rcx),%xmm5
movdqa 224-256(%rcx),%xmm6
movdqa 240-256(%rcx),%xmm7
movdqa 256-256(%rcx),%xmm0
movdqa 272-256(%rcx),%xmm1
movdqa 288-256(%rcx),%xmm2
movdqa 304-256(%rcx),%xmm3
paddd L$four(%rip),%xmm0
L$oop_enter4x:
movdqa %xmm6,32(%rsp)
movdqa %xmm7,48(%rsp)
movdqa (%r10),%xmm7
movl $10,%eax
movdqa %xmm0,256-256(%rcx)
jmp L$oop4x
.p2align 5
L$oop4x:
paddd %xmm12,%xmm8
paddd %xmm13,%xmm9
pxor %xmm8,%xmm0
pxor %xmm9,%xmm1
.byte 102,15,56,0,199
.byte 102,15,56,0,207
paddd %xmm0,%xmm4
paddd %xmm1,%xmm5
pxor %xmm4,%xmm12
pxor %xmm5,%xmm13
movdqa %xmm12,%xmm6
pslld $12,%xmm12
psrld $20,%xmm6
movdqa %xmm13,%xmm7
pslld $12,%xmm13
por %xmm6,%xmm12
psrld $20,%xmm7
movdqa (%r11),%xmm6
por %xmm7,%xmm13
paddd %xmm12,%xmm8
paddd %xmm13,%xmm9
pxor %xmm8,%xmm0
pxor %xmm9,%xmm1
.byte 102,15,56,0,198
.byte 102,15,56,0,206
paddd %xmm0,%xmm4
paddd %xmm1,%xmm5
pxor %xmm4,%xmm12
pxor %xmm5,%xmm13
movdqa %xmm12,%xmm7
pslld $7,%xmm12
psrld $25,%xmm7
movdqa %xmm13,%xmm6
pslld $7,%xmm13
por %xmm7,%xmm12
psrld $25,%xmm6
movdqa (%r10),%xmm7
por %xmm6,%xmm13
movdqa %xmm4,0(%rsp)
movdqa %xmm5,16(%rsp)
movdqa 32(%rsp),%xmm4
movdqa 48(%rsp),%xmm5
paddd %xmm14,%xmm10
paddd %xmm15,%xmm11
pxor %xmm10,%xmm2
pxor %xmm11,%xmm3
.byte 102,15,56,0,215
.byte 102,15,56,0,223
paddd %xmm2,%xmm4
paddd %xmm3,%xmm5
pxor %xmm4,%xmm14
pxor %xmm5,%xmm15
movdqa %xmm14,%xmm6
pslld $12,%xmm14
psrld $20,%xmm6
movdqa %xmm15,%xmm7
pslld $12,%xmm15
por %xmm6,%xmm14
psrld $20,%xmm7
movdqa (%r11),%xmm6
por %xmm7,%xmm15
paddd %xmm14,%xmm10
paddd %xmm15,%xmm11
pxor %xmm10,%xmm2
pxor %xmm11,%xmm3
.byte 102,15,56,0,214
.byte 102,15,56,0,222
paddd %xmm2,%xmm4
paddd %xmm3,%xmm5
pxor %xmm4,%xmm14
pxor %xmm5,%xmm15
movdqa %xmm14,%xmm7
pslld $7,%xmm14
psrld $25,%xmm7
movdqa %xmm15,%xmm6
pslld $7,%xmm15
por %xmm7,%xmm14
psrld $25,%xmm6
movdqa (%r10),%xmm7
por %xmm6,%xmm15
paddd %xmm13,%xmm8
paddd %xmm14,%xmm9
pxor %xmm8,%xmm3
pxor %xmm9,%xmm0
.byte 102,15,56,0,223
.byte 102,15,56,0,199
paddd %xmm3,%xmm4
paddd %xmm0,%xmm5
pxor %xmm4,%xmm13
pxor %xmm5,%xmm14
movdqa %xmm13,%xmm6
pslld $12,%xmm13
psrld $20,%xmm6
movdqa %xmm14,%xmm7
pslld $12,%xmm14
por %xmm6,%xmm13
psrld $20,%xmm7
movdqa (%r11),%xmm6
por %xmm7,%xmm14
paddd %xmm13,%xmm8
paddd %xmm14,%xmm9
pxor %xmm8,%xmm3
pxor %xmm9,%xmm0
.byte 102,15,56,0,222
.byte 102,15,56,0,198
paddd %xmm3,%xmm4
paddd %xmm0,%xmm5
pxor %xmm4,%xmm13
pxor %xmm5,%xmm14
movdqa %xmm13,%xmm7
pslld $7,%xmm13
psrld $25,%xmm7
movdqa %xmm14,%xmm6
pslld $7,%xmm14
por %xmm7,%xmm13
psrld $25,%xmm6
movdqa (%r10),%xmm7
por %xmm6,%xmm14
movdqa %xmm4,32(%rsp)
movdqa %xmm5,48(%rsp)
movdqa 0(%rsp),%xmm4
movdqa 16(%rsp),%xmm5
paddd %xmm15,%xmm10
paddd %xmm12,%xmm11
pxor %xmm10,%xmm1
pxor %xmm11,%xmm2
.byte 102,15,56,0,207
.byte 102,15,56,0,215
paddd %xmm1,%xmm4
paddd %xmm2,%xmm5
pxor %xmm4,%xmm15
pxor %xmm5,%xmm12
movdqa %xmm15,%xmm6
pslld $12,%xmm15
psrld $20,%xmm6
movdqa %xmm12,%xmm7
pslld $12,%xmm12
por %xmm6,%xmm15
psrld $20,%xmm7
movdqa (%r11),%xmm6
por %xmm7,%xmm12
paddd %xmm15,%xmm10
paddd %xmm12,%xmm11
pxor %xmm10,%xmm1
pxor %xmm11,%xmm2
.byte 102,15,56,0,206
.byte 102,15,56,0,214
paddd %xmm1,%xmm4
paddd %xmm2,%xmm5
pxor %xmm4,%xmm15
pxor %xmm5,%xmm12
movdqa %xmm15,%xmm7
pslld $7,%xmm15
psrld $25,%xmm7
movdqa %xmm12,%xmm6
pslld $7,%xmm12
por %xmm7,%xmm15
psrld $25,%xmm6
movdqa (%r10),%xmm7
por %xmm6,%xmm12
decl %eax
jnz L$oop4x
paddd 64(%rsp),%xmm8
paddd 80(%rsp),%xmm9
paddd 96(%rsp),%xmm10
paddd 112(%rsp),%xmm11
movdqa %xmm8,%xmm6
punpckldq %xmm9,%xmm8
movdqa %xmm10,%xmm7
punpckldq %xmm11,%xmm10
punpckhdq %xmm9,%xmm6
punpckhdq %xmm11,%xmm7
movdqa %xmm8,%xmm9
punpcklqdq %xmm10,%xmm8
movdqa %xmm6,%xmm11
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm10,%xmm9
punpckhqdq %xmm7,%xmm11
paddd 128-256(%rcx),%xmm12
paddd 144-256(%rcx),%xmm13
paddd 160-256(%rcx),%xmm14
paddd 176-256(%rcx),%xmm15
movdqa %xmm8,0(%rsp)
movdqa %xmm9,16(%rsp)
movdqa 32(%rsp),%xmm8
movdqa 48(%rsp),%xmm9
movdqa %xmm12,%xmm10
punpckldq %xmm13,%xmm12
movdqa %xmm14,%xmm7
punpckldq %xmm15,%xmm14
punpckhdq %xmm13,%xmm10
punpckhdq %xmm15,%xmm7
movdqa %xmm12,%xmm13
punpcklqdq %xmm14,%xmm12
movdqa %xmm10,%xmm15
punpcklqdq %xmm7,%xmm10
punpckhqdq %xmm14,%xmm13
punpckhqdq %xmm7,%xmm15
paddd 192-256(%rcx),%xmm4
paddd 208-256(%rcx),%xmm5
paddd 224-256(%rcx),%xmm8
paddd 240-256(%rcx),%xmm9
movdqa %xmm6,32(%rsp)
movdqa %xmm11,48(%rsp)
movdqa %xmm4,%xmm14
punpckldq %xmm5,%xmm4
movdqa %xmm8,%xmm7
punpckldq %xmm9,%xmm8
punpckhdq %xmm5,%xmm14
punpckhdq %xmm9,%xmm7
movdqa %xmm4,%xmm5
punpcklqdq %xmm8,%xmm4
movdqa %xmm14,%xmm9
punpcklqdq %xmm7,%xmm14
punpckhqdq %xmm8,%xmm5
punpckhqdq %xmm7,%xmm9
paddd 256-256(%rcx),%xmm0
paddd 272-256(%rcx),%xmm1
paddd 288-256(%rcx),%xmm2
paddd 304-256(%rcx),%xmm3
movdqa %xmm0,%xmm8
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm8
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm8,%xmm3
punpcklqdq %xmm7,%xmm8
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
cmpq $256,%rdx
jb L$tail4x
movdqu 0(%rsi),%xmm6
movdqu 16(%rsi),%xmm11
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm7
pxor 0(%rsp),%xmm6
pxor %xmm12,%xmm11
pxor %xmm4,%xmm2
pxor %xmm0,%xmm7
movdqu %xmm6,0(%rdi)
movdqu 64(%rsi),%xmm6
movdqu %xmm11,16(%rdi)
movdqu 80(%rsi),%xmm11
movdqu %xmm2,32(%rdi)
movdqu 96(%rsi),%xmm2
movdqu %xmm7,48(%rdi)
movdqu 112(%rsi),%xmm7
leaq 128(%rsi),%rsi
pxor 16(%rsp),%xmm6
pxor %xmm13,%xmm11
pxor %xmm5,%xmm2
pxor %xmm1,%xmm7
movdqu %xmm6,64(%rdi)
movdqu 0(%rsi),%xmm6
movdqu %xmm11,80(%rdi)
movdqu 16(%rsi),%xmm11
movdqu %xmm2,96(%rdi)
movdqu 32(%rsi),%xmm2
movdqu %xmm7,112(%rdi)
leaq 128(%rdi),%rdi
movdqu 48(%rsi),%xmm7
pxor 32(%rsp),%xmm6
pxor %xmm10,%xmm11
pxor %xmm14,%xmm2
pxor %xmm8,%xmm7
movdqu %xmm6,0(%rdi)
movdqu 64(%rsi),%xmm6
movdqu %xmm11,16(%rdi)
movdqu 80(%rsi),%xmm11
movdqu %xmm2,32(%rdi)
movdqu 96(%rsi),%xmm2
movdqu %xmm7,48(%rdi)
movdqu 112(%rsi),%xmm7
leaq 128(%rsi),%rsi
pxor 48(%rsp),%xmm6
pxor %xmm15,%xmm11
pxor %xmm9,%xmm2
pxor %xmm3,%xmm7
movdqu %xmm6,64(%rdi)
movdqu %xmm11,80(%rdi)
movdqu %xmm2,96(%rdi)
movdqu %xmm7,112(%rdi)
leaq 128(%rdi),%rdi
subq $256,%rdx
jnz L$oop_outer4x
jmp L$done4x
L$tail4x:
cmpq $192,%rdx
jae L$192_or_more4x
cmpq $128,%rdx
jae L$128_or_more4x
cmpq $64,%rdx
jae L$64_or_more4x
xorq %r10,%r10
movdqa %xmm12,16(%rsp)
movdqa %xmm4,32(%rsp)
movdqa %xmm0,48(%rsp)
jmp L$oop_tail4x
.p2align 5
L$64_or_more4x:
movdqu 0(%rsi),%xmm6
movdqu 16(%rsi),%xmm11
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm7
pxor 0(%rsp),%xmm6
pxor %xmm12,%xmm11
pxor %xmm4,%xmm2
pxor %xmm0,%xmm7
movdqu %xmm6,0(%rdi)
movdqu %xmm11,16(%rdi)
movdqu %xmm2,32(%rdi)
movdqu %xmm7,48(%rdi)
je L$done4x
movdqa 16(%rsp),%xmm6
leaq 64(%rsi),%rsi
xorq %r10,%r10
movdqa %xmm6,0(%rsp)
movdqa %xmm13,16(%rsp)
leaq 64(%rdi),%rdi
movdqa %xmm5,32(%rsp)
subq $64,%rdx
movdqa %xmm1,48(%rsp)
jmp L$oop_tail4x
.p2align 5
L$128_or_more4x:
movdqu 0(%rsi),%xmm6
movdqu 16(%rsi),%xmm11
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm7
pxor 0(%rsp),%xmm6
pxor %xmm12,%xmm11
pxor %xmm4,%xmm2
pxor %xmm0,%xmm7
movdqu %xmm6,0(%rdi)
movdqu 64(%rsi),%xmm6
movdqu %xmm11,16(%rdi)
movdqu 80(%rsi),%xmm11
movdqu %xmm2,32(%rdi)
movdqu 96(%rsi),%xmm2
movdqu %xmm7,48(%rdi)
movdqu 112(%rsi),%xmm7
pxor 16(%rsp),%xmm6
pxor %xmm13,%xmm11
pxor %xmm5,%xmm2
pxor %xmm1,%xmm7
movdqu %xmm6,64(%rdi)
movdqu %xmm11,80(%rdi)
movdqu %xmm2,96(%rdi)
movdqu %xmm7,112(%rdi)
je L$done4x
movdqa 32(%rsp),%xmm6
leaq 128(%rsi),%rsi
xorq %r10,%r10
movdqa %xmm6,0(%rsp)
movdqa %xmm10,16(%rsp)
leaq 128(%rdi),%rdi
movdqa %xmm14,32(%rsp)
subq $128,%rdx
movdqa %xmm8,48(%rsp)
jmp L$oop_tail4x
.p2align 5
L$192_or_more4x:
movdqu 0(%rsi),%xmm6
movdqu 16(%rsi),%xmm11
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm7
pxor 0(%rsp),%xmm6
pxor %xmm12,%xmm11
pxor %xmm4,%xmm2
pxor %xmm0,%xmm7
movdqu %xmm6,0(%rdi)
movdqu 64(%rsi),%xmm6
movdqu %xmm11,16(%rdi)
movdqu 80(%rsi),%xmm11
movdqu %xmm2,32(%rdi)
movdqu 96(%rsi),%xmm2
movdqu %xmm7,48(%rdi)
movdqu 112(%rsi),%xmm7
leaq 128(%rsi),%rsi
pxor 16(%rsp),%xmm6
pxor %xmm13,%xmm11
pxor %xmm5,%xmm2
pxor %xmm1,%xmm7
movdqu %xmm6,64(%rdi)
movdqu 0(%rsi),%xmm6
movdqu %xmm11,80(%rdi)
movdqu 16(%rsi),%xmm11
movdqu %xmm2,96(%rdi)
movdqu 32(%rsi),%xmm2
movdqu %xmm7,112(%rdi)
leaq 128(%rdi),%rdi
movdqu 48(%rsi),%xmm7
pxor 32(%rsp),%xmm6
pxor %xmm10,%xmm11
pxor %xmm14,%xmm2
pxor %xmm8,%xmm7
movdqu %xmm6,0(%rdi)
movdqu %xmm11,16(%rdi)
movdqu %xmm2,32(%rdi)
movdqu %xmm7,48(%rdi)
je L$done4x
movdqa 48(%rsp),%xmm6
leaq 64(%rsi),%rsi
xorq %r10,%r10
movdqa %xmm6,0(%rsp)
movdqa %xmm15,16(%rsp)
leaq 64(%rdi),%rdi
movdqa %xmm9,32(%rsp)
subq $192,%rdx
movdqa %xmm3,48(%rsp)
L$oop_tail4x:
movzbl (%rsi,%r10,1),%eax
movzbl (%rsp,%r10,1),%ecx
leaq 1(%r10),%r10
xorl %ecx,%eax
movb %al,-1(%rdi,%r10,1)
decq %rdx
jnz L$oop_tail4x
L$done4x:
leaq (%r9),%rsp
L$4x_epilogue:
ret
.globl _ChaCha20_ctr32_avx2
.private_extern _ChaCha20_ctr32_avx2
.p2align 5
_ChaCha20_ctr32_avx2:
_CET_ENDBR
movq %rsp,%r9
subq $0x280+8,%rsp
andq $-32,%rsp
vzeroupper
vbroadcasti128 L$sigma(%rip),%ymm11
vbroadcasti128 (%rcx),%ymm3
vbroadcasti128 16(%rcx),%ymm15
vbroadcasti128 (%r8),%ymm7
leaq 256(%rsp),%rcx
leaq 512(%rsp),%rax
leaq L$rot16(%rip),%r10
leaq L$rot24(%rip),%r11
vpshufd $0x00,%ymm11,%ymm8
vpshufd $0x55,%ymm11,%ymm9
vmovdqa %ymm8,128-256(%rcx)
vpshufd $0xaa,%ymm11,%ymm10
vmovdqa %ymm9,160-256(%rcx)
vpshufd $0xff,%ymm11,%ymm11
vmovdqa %ymm10,192-256(%rcx)
vmovdqa %ymm11,224-256(%rcx)
vpshufd $0x00,%ymm3,%ymm0
vpshufd $0x55,%ymm3,%ymm1
vmovdqa %ymm0,256-256(%rcx)
vpshufd $0xaa,%ymm3,%ymm2
vmovdqa %ymm1,288-256(%rcx)
vpshufd $0xff,%ymm3,%ymm3
vmovdqa %ymm2,320-256(%rcx)
vmovdqa %ymm3,352-256(%rcx)
vpshufd $0x00,%ymm15,%ymm12
vpshufd $0x55,%ymm15,%ymm13
vmovdqa %ymm12,384-512(%rax)
vpshufd $0xaa,%ymm15,%ymm14
vmovdqa %ymm13,416-512(%rax)
vpshufd $0xff,%ymm15,%ymm15
vmovdqa %ymm14,448-512(%rax)
vmovdqa %ymm15,480-512(%rax)
vpshufd $0x00,%ymm7,%ymm4
vpshufd $0x55,%ymm7,%ymm5
vpaddd L$incy(%rip),%ymm4,%ymm4
vpshufd $0xaa,%ymm7,%ymm6
vmovdqa %ymm5,544-512(%rax)
vpshufd $0xff,%ymm7,%ymm7
vmovdqa %ymm6,576-512(%rax)
vmovdqa %ymm7,608-512(%rax)
jmp L$oop_enter8x
.p2align 5
L$oop_outer8x:
vmovdqa 128-256(%rcx),%ymm8
vmovdqa 160-256(%rcx),%ymm9
vmovdqa 192-256(%rcx),%ymm10
vmovdqa 224-256(%rcx),%ymm11
vmovdqa 256-256(%rcx),%ymm0
vmovdqa 288-256(%rcx),%ymm1
vmovdqa 320-256(%rcx),%ymm2
vmovdqa 352-256(%rcx),%ymm3
vmovdqa 384-512(%rax),%ymm12
vmovdqa 416-512(%rax),%ymm13
vmovdqa 448-512(%rax),%ymm14
vmovdqa 480-512(%rax),%ymm15
vmovdqa 512-512(%rax),%ymm4
vmovdqa 544-512(%rax),%ymm5
vmovdqa 576-512(%rax),%ymm6
vmovdqa 608-512(%rax),%ymm7
vpaddd L$eight(%rip),%ymm4,%ymm4
L$oop_enter8x:
vmovdqa %ymm14,64(%rsp)
vmovdqa %ymm15,96(%rsp)
vbroadcasti128 (%r10),%ymm15
vmovdqa %ymm4,512-512(%rax)
movl $10,%eax
jmp L$oop8x
.p2align 5
L$oop8x:
vpaddd %ymm0,%ymm8,%ymm8
vpxor %ymm4,%ymm8,%ymm4
vpshufb %ymm15,%ymm4,%ymm4
vpaddd %ymm1,%ymm9,%ymm9
vpxor %ymm5,%ymm9,%ymm5
vpshufb %ymm15,%ymm5,%ymm5
vpaddd %ymm4,%ymm12,%ymm12
vpxor %ymm0,%ymm12,%ymm0
vpslld $12,%ymm0,%ymm14
vpsrld $20,%ymm0,%ymm0
vpor %ymm0,%ymm14,%ymm0
vbroadcasti128 (%r11),%ymm14
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm13,%ymm1
vpslld $12,%ymm1,%ymm15
vpsrld $20,%ymm1,%ymm1
vpor %ymm1,%ymm15,%ymm1
vpaddd %ymm0,%ymm8,%ymm8
vpxor %ymm4,%ymm8,%ymm4
vpshufb %ymm14,%ymm4,%ymm4
vpaddd %ymm1,%ymm9,%ymm9
vpxor %ymm5,%ymm9,%ymm5
vpshufb %ymm14,%ymm5,%ymm5
vpaddd %ymm4,%ymm12,%ymm12
vpxor %ymm0,%ymm12,%ymm0
vpslld $7,%ymm0,%ymm15
vpsrld $25,%ymm0,%ymm0
vpor %ymm0,%ymm15,%ymm0
vbroadcasti128 (%r10),%ymm15
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm13,%ymm1
vpslld $7,%ymm1,%ymm14
vpsrld $25,%ymm1,%ymm1
vpor %ymm1,%ymm14,%ymm1
vmovdqa %ymm12,0(%rsp)
vmovdqa %ymm13,32(%rsp)
vmovdqa 64(%rsp),%ymm12
vmovdqa 96(%rsp),%ymm13
vpaddd %ymm2,%ymm10,%ymm10
vpxor %ymm6,%ymm10,%ymm6
vpshufb %ymm15,%ymm6,%ymm6
vpaddd %ymm3,%ymm11,%ymm11
vpxor %ymm7,%ymm11,%ymm7
vpshufb %ymm15,%ymm7,%ymm7
vpaddd %ymm6,%ymm12,%ymm12
vpxor %ymm2,%ymm12,%ymm2
vpslld $12,%ymm2,%ymm14
vpsrld $20,%ymm2,%ymm2
vpor %ymm2,%ymm14,%ymm2
vbroadcasti128 (%r11),%ymm14
vpaddd %ymm7,%ymm13,%ymm13
vpxor %ymm3,%ymm13,%ymm3
vpslld $12,%ymm3,%ymm15
vpsrld $20,%ymm3,%ymm3
vpor %ymm3,%ymm15,%ymm3
vpaddd %ymm2,%ymm10,%ymm10
vpxor %ymm6,%ymm10,%ymm6
vpshufb %ymm14,%ymm6,%ymm6
vpaddd %ymm3,%ymm11,%ymm11
vpxor %ymm7,%ymm11,%ymm7
vpshufb %ymm14,%ymm7,%ymm7
vpaddd %ymm6,%ymm12,%ymm12
vpxor %ymm2,%ymm12,%ymm2
vpslld $7,%ymm2,%ymm15
vpsrld $25,%ymm2,%ymm2
vpor %ymm2,%ymm15,%ymm2
vbroadcasti128 (%r10),%ymm15
vpaddd %ymm7,%ymm13,%ymm13
vpxor %ymm3,%ymm13,%ymm3
vpslld $7,%ymm3,%ymm14
vpsrld $25,%ymm3,%ymm3
vpor %ymm3,%ymm14,%ymm3
vpaddd %ymm1,%ymm8,%ymm8
vpxor %ymm7,%ymm8,%ymm7
vpshufb %ymm15,%ymm7,%ymm7
vpaddd %ymm2,%ymm9,%ymm9
vpxor %ymm4,%ymm9,%ymm4
vpshufb %ymm15,%ymm4,%ymm4
vpaddd %ymm7,%ymm12,%ymm12
vpxor %ymm1,%ymm12,%ymm1
vpslld $12,%ymm1,%ymm14
vpsrld $20,%ymm1,%ymm1
vpor %ymm1,%ymm14,%ymm1
vbroadcasti128 (%r11),%ymm14
vpaddd %ymm4,%ymm13,%ymm13
vpxor %ymm2,%ymm13,%ymm2
vpslld $12,%ymm2,%ymm15
vpsrld $20,%ymm2,%ymm2
vpor %ymm2,%ymm15,%ymm2
vpaddd %ymm1,%ymm8,%ymm8
vpxor %ymm7,%ymm8,%ymm7
vpshufb %ymm14,%ymm7,%ymm7
vpaddd %ymm2,%ymm9,%ymm9
vpxor %ymm4,%ymm9,%ymm4
vpshufb %ymm14,%ymm4,%ymm4
vpaddd %ymm7,%ymm12,%ymm12
vpxor %ymm1,%ymm12,%ymm1
vpslld $7,%ymm1,%ymm15
vpsrld $25,%ymm1,%ymm1
vpor %ymm1,%ymm15,%ymm1
vbroadcasti128 (%r10),%ymm15
vpaddd %ymm4,%ymm13,%ymm13
vpxor %ymm2,%ymm13,%ymm2
vpslld $7,%ymm2,%ymm14
vpsrld $25,%ymm2,%ymm2
vpor %ymm2,%ymm14,%ymm2
vmovdqa %ymm12,64(%rsp)
vmovdqa %ymm13,96(%rsp)
vmovdqa 0(%rsp),%ymm12
vmovdqa 32(%rsp),%ymm13
vpaddd %ymm3,%ymm10,%ymm10
vpxor %ymm5,%ymm10,%ymm5
vpshufb %ymm15,%ymm5,%ymm5
vpaddd %ymm0,%ymm11,%ymm11
vpxor %ymm6,%ymm11,%ymm6
vpshufb %ymm15,%ymm6,%ymm6
vpaddd %ymm5,%ymm12,%ymm12
vpxor %ymm3,%ymm12,%ymm3
vpslld $12,%ymm3,%ymm14
vpsrld $20,%ymm3,%ymm3
vpor %ymm3,%ymm14,%ymm3
vbroadcasti128 (%r11),%ymm14
vpaddd %ymm6,%ymm13,%ymm13
vpxor %ymm0,%ymm13,%ymm0
vpslld $12,%ymm0,%ymm15
vpsrld $20,%ymm0,%ymm0
vpor %ymm0,%ymm15,%ymm0
vpaddd %ymm3,%ymm10,%ymm10
vpxor %ymm5,%ymm10,%ymm5
vpshufb %ymm14,%ymm5,%ymm5
vpaddd %ymm0,%ymm11,%ymm11
vpxor %ymm6,%ymm11,%ymm6
vpshufb %ymm14,%ymm6,%ymm6
vpaddd %ymm5,%ymm12,%ymm12
vpxor %ymm3,%ymm12,%ymm3
vpslld $7,%ymm3,%ymm15
vpsrld $25,%ymm3,%ymm3
vpor %ymm3,%ymm15,%ymm3
vbroadcasti128 (%r10),%ymm15
vpaddd %ymm6,%ymm13,%ymm13
vpxor %ymm0,%ymm13,%ymm0
vpslld $7,%ymm0,%ymm14
vpsrld $25,%ymm0,%ymm0
vpor %ymm0,%ymm14,%ymm0
decl %eax
jnz L$oop8x
leaq 512(%rsp),%rax
vpaddd 128-256(%rcx),%ymm8,%ymm8
vpaddd 160-256(%rcx),%ymm9,%ymm9
vpaddd 192-256(%rcx),%ymm10,%ymm10
vpaddd 224-256(%rcx),%ymm11,%ymm11
vpunpckldq %ymm9,%ymm8,%ymm14
vpunpckldq %ymm11,%ymm10,%ymm15
vpunpckhdq %ymm9,%ymm8,%ymm8
vpunpckhdq %ymm11,%ymm10,%ymm10
vpunpcklqdq %ymm15,%ymm14,%ymm9
vpunpckhqdq %ymm15,%ymm14,%ymm14
vpunpcklqdq %ymm10,%ymm8,%ymm11
vpunpckhqdq %ymm10,%ymm8,%ymm8
vpaddd 256-256(%rcx),%ymm0,%ymm0
vpaddd 288-256(%rcx),%ymm1,%ymm1
vpaddd 320-256(%rcx),%ymm2,%ymm2
vpaddd 352-256(%rcx),%ymm3,%ymm3
vpunpckldq %ymm1,%ymm0,%ymm10
vpunpckldq %ymm3,%ymm2,%ymm15
vpunpckhdq %ymm1,%ymm0,%ymm0
vpunpckhdq %ymm3,%ymm2,%ymm2
vpunpcklqdq %ymm15,%ymm10,%ymm1
vpunpckhqdq %ymm15,%ymm10,%ymm10
vpunpcklqdq %ymm2,%ymm0,%ymm3
vpunpckhqdq %ymm2,%ymm0,%ymm0
vperm2i128 $0x20,%ymm1,%ymm9,%ymm15
vperm2i128 $0x31,%ymm1,%ymm9,%ymm1
vperm2i128 $0x20,%ymm10,%ymm14,%ymm9
vperm2i128 $0x31,%ymm10,%ymm14,%ymm10
vperm2i128 $0x20,%ymm3,%ymm11,%ymm14
vperm2i128 $0x31,%ymm3,%ymm11,%ymm3
vperm2i128 $0x20,%ymm0,%ymm8,%ymm11
vperm2i128 $0x31,%ymm0,%ymm8,%ymm0
vmovdqa %ymm15,0(%rsp)
vmovdqa %ymm9,32(%rsp)
vmovdqa 64(%rsp),%ymm15
vmovdqa 96(%rsp),%ymm9
vpaddd 384-512(%rax),%ymm12,%ymm12
vpaddd 416-512(%rax),%ymm13,%ymm13
vpaddd 448-512(%rax),%ymm15,%ymm15
vpaddd 480-512(%rax),%ymm9,%ymm9
vpunpckldq %ymm13,%ymm12,%ymm2
vpunpckldq %ymm9,%ymm15,%ymm8
vpunpckhdq %ymm13,%ymm12,%ymm12
vpunpckhdq %ymm9,%ymm15,%ymm15
vpunpcklqdq %ymm8,%ymm2,%ymm13
vpunpckhqdq %ymm8,%ymm2,%ymm2
vpunpcklqdq %ymm15,%ymm12,%ymm9
vpunpckhqdq %ymm15,%ymm12,%ymm12
vpaddd 512-512(%rax),%ymm4,%ymm4
vpaddd 544-512(%rax),%ymm5,%ymm5
vpaddd 576-512(%rax),%ymm6,%ymm6
vpaddd 608-512(%rax),%ymm7,%ymm7
vpunpckldq %ymm5,%ymm4,%ymm15
vpunpckldq %ymm7,%ymm6,%ymm8
vpunpckhdq %ymm5,%ymm4,%ymm4
vpunpckhdq %ymm7,%ymm6,%ymm6
vpunpcklqdq %ymm8,%ymm15,%ymm5
vpunpckhqdq %ymm8,%ymm15,%ymm15
vpunpcklqdq %ymm6,%ymm4,%ymm7
vpunpckhqdq %ymm6,%ymm4,%ymm4
vperm2i128 $0x20,%ymm5,%ymm13,%ymm8
vperm2i128 $0x31,%ymm5,%ymm13,%ymm5
vperm2i128 $0x20,%ymm15,%ymm2,%ymm13
vperm2i128 $0x31,%ymm15,%ymm2,%ymm15
vperm2i128 $0x20,%ymm7,%ymm9,%ymm2
vperm2i128 $0x31,%ymm7,%ymm9,%ymm7
vperm2i128 $0x20,%ymm4,%ymm12,%ymm9
vperm2i128 $0x31,%ymm4,%ymm12,%ymm4
vmovdqa 0(%rsp),%ymm6
vmovdqa 32(%rsp),%ymm12
cmpq $512,%rdx
jb L$tail8x
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
leaq 128(%rsi),%rsi
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
leaq 128(%rdi),%rdi
vpxor 0(%rsi),%ymm12,%ymm12
vpxor 32(%rsi),%ymm13,%ymm13
vpxor 64(%rsi),%ymm10,%ymm10
vpxor 96(%rsi),%ymm15,%ymm15
leaq 128(%rsi),%rsi
vmovdqu %ymm12,0(%rdi)
vmovdqu %ymm13,32(%rdi)
vmovdqu %ymm10,64(%rdi)
vmovdqu %ymm15,96(%rdi)
leaq 128(%rdi),%rdi
vpxor 0(%rsi),%ymm14,%ymm14
vpxor 32(%rsi),%ymm2,%ymm2
vpxor 64(%rsi),%ymm3,%ymm3
vpxor 96(%rsi),%ymm7,%ymm7
leaq 128(%rsi),%rsi
vmovdqu %ymm14,0(%rdi)
vmovdqu %ymm2,32(%rdi)
vmovdqu %ymm3,64(%rdi)
vmovdqu %ymm7,96(%rdi)
leaq 128(%rdi),%rdi
vpxor 0(%rsi),%ymm11,%ymm11
vpxor 32(%rsi),%ymm9,%ymm9
vpxor 64(%rsi),%ymm0,%ymm0
vpxor 96(%rsi),%ymm4,%ymm4
leaq 128(%rsi),%rsi
vmovdqu %ymm11,0(%rdi)
vmovdqu %ymm9,32(%rdi)
vmovdqu %ymm0,64(%rdi)
vmovdqu %ymm4,96(%rdi)
leaq 128(%rdi),%rdi
subq $512,%rdx
jnz L$oop_outer8x
jmp L$done8x
L$tail8x:
cmpq $448,%rdx
jae L$448_or_more8x
cmpq $384,%rdx
jae L$384_or_more8x
cmpq $320,%rdx
jae L$320_or_more8x
cmpq $256,%rdx
jae L$256_or_more8x
cmpq $192,%rdx
jae L$192_or_more8x
cmpq $128,%rdx
jae L$128_or_more8x
cmpq $64,%rdx
jae L$64_or_more8x
xorq %r10,%r10
vmovdqa %ymm6,0(%rsp)
vmovdqa %ymm8,32(%rsp)
jmp L$oop_tail8x
.p2align 5
L$64_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
je L$done8x
leaq 64(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm1,0(%rsp)
leaq 64(%rdi),%rdi
subq $64,%rdx
vmovdqa %ymm5,32(%rsp)
jmp L$oop_tail8x
.p2align 5
L$128_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
je L$done8x
leaq 128(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm12,0(%rsp)
leaq 128(%rdi),%rdi
subq $128,%rdx
vmovdqa %ymm13,32(%rsp)
jmp L$oop_tail8x
.p2align 5
L$192_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vpxor 128(%rsi),%ymm12,%ymm12
vpxor 160(%rsi),%ymm13,%ymm13
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
vmovdqu %ymm12,128(%rdi)
vmovdqu %ymm13,160(%rdi)
je L$done8x
leaq 192(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm10,0(%rsp)
leaq 192(%rdi),%rdi
subq $192,%rdx
vmovdqa %ymm15,32(%rsp)
jmp L$oop_tail8x
.p2align 5
L$256_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vpxor 128(%rsi),%ymm12,%ymm12
vpxor 160(%rsi),%ymm13,%ymm13
vpxor 192(%rsi),%ymm10,%ymm10
vpxor 224(%rsi),%ymm15,%ymm15
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
vmovdqu %ymm12,128(%rdi)
vmovdqu %ymm13,160(%rdi)
vmovdqu %ymm10,192(%rdi)
vmovdqu %ymm15,224(%rdi)
je L$done8x
leaq 256(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm14,0(%rsp)
leaq 256(%rdi),%rdi
subq $256,%rdx
vmovdqa %ymm2,32(%rsp)
jmp L$oop_tail8x
.p2align 5
L$320_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vpxor 128(%rsi),%ymm12,%ymm12
vpxor 160(%rsi),%ymm13,%ymm13
vpxor 192(%rsi),%ymm10,%ymm10
vpxor 224(%rsi),%ymm15,%ymm15
vpxor 256(%rsi),%ymm14,%ymm14
vpxor 288(%rsi),%ymm2,%ymm2
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
vmovdqu %ymm12,128(%rdi)
vmovdqu %ymm13,160(%rdi)
vmovdqu %ymm10,192(%rdi)
vmovdqu %ymm15,224(%rdi)
vmovdqu %ymm14,256(%rdi)
vmovdqu %ymm2,288(%rdi)
je L$done8x
leaq 320(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm3,0(%rsp)
leaq 320(%rdi),%rdi
subq $320,%rdx
vmovdqa %ymm7,32(%rsp)
jmp L$oop_tail8x
.p2align 5
L$384_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vpxor 128(%rsi),%ymm12,%ymm12
vpxor 160(%rsi),%ymm13,%ymm13
vpxor 192(%rsi),%ymm10,%ymm10
vpxor 224(%rsi),%ymm15,%ymm15
vpxor 256(%rsi),%ymm14,%ymm14
vpxor 288(%rsi),%ymm2,%ymm2
vpxor 320(%rsi),%ymm3,%ymm3
vpxor 352(%rsi),%ymm7,%ymm7
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
vmovdqu %ymm12,128(%rdi)
vmovdqu %ymm13,160(%rdi)
vmovdqu %ymm10,192(%rdi)
vmovdqu %ymm15,224(%rdi)
vmovdqu %ymm14,256(%rdi)
vmovdqu %ymm2,288(%rdi)
vmovdqu %ymm3,320(%rdi)
vmovdqu %ymm7,352(%rdi)
je L$done8x
leaq 384(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm11,0(%rsp)
leaq 384(%rdi),%rdi
subq $384,%rdx
vmovdqa %ymm9,32(%rsp)
jmp L$oop_tail8x
.p2align 5
L$448_or_more8x:
vpxor 0(%rsi),%ymm6,%ymm6
vpxor 32(%rsi),%ymm8,%ymm8
vpxor 64(%rsi),%ymm1,%ymm1
vpxor 96(%rsi),%ymm5,%ymm5
vpxor 128(%rsi),%ymm12,%ymm12
vpxor 160(%rsi),%ymm13,%ymm13
vpxor 192(%rsi),%ymm10,%ymm10
vpxor 224(%rsi),%ymm15,%ymm15
vpxor 256(%rsi),%ymm14,%ymm14
vpxor 288(%rsi),%ymm2,%ymm2
vpxor 320(%rsi),%ymm3,%ymm3
vpxor 352(%rsi),%ymm7,%ymm7
vpxor 384(%rsi),%ymm11,%ymm11
vpxor 416(%rsi),%ymm9,%ymm9
vmovdqu %ymm6,0(%rdi)
vmovdqu %ymm8,32(%rdi)
vmovdqu %ymm1,64(%rdi)
vmovdqu %ymm5,96(%rdi)
vmovdqu %ymm12,128(%rdi)
vmovdqu %ymm13,160(%rdi)
vmovdqu %ymm10,192(%rdi)
vmovdqu %ymm15,224(%rdi)
vmovdqu %ymm14,256(%rdi)
vmovdqu %ymm2,288(%rdi)
vmovdqu %ymm3,320(%rdi)
vmovdqu %ymm7,352(%rdi)
vmovdqu %ymm11,384(%rdi)
vmovdqu %ymm9,416(%rdi)
je L$done8x
leaq 448(%rsi),%rsi
xorq %r10,%r10
vmovdqa %ymm0,0(%rsp)
leaq 448(%rdi),%rdi
subq $448,%rdx
vmovdqa %ymm4,32(%rsp)
L$oop_tail8x:
movzbl (%rsi,%r10,1),%eax
movzbl (%rsp,%r10,1),%ecx
leaq 1(%r10),%r10
xorl %ecx,%eax
movb %al,-1(%rdi,%r10,1)
decq %rdx
jnz L$oop_tail8x
L$done8x:
vzeroall
leaq (%r9),%rsp
L$8x_epilogue:
ret
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 25,498
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/vpaes-armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.section .rodata
.align 7 // totally strategic alignment
_vpaes_consts:
Lk_mc_forward: // mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
Lk_mc_backward: // mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
Lk_sr: // sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
//
// "Hot" constants
//
Lk_inv: // inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
Lk_ipt: // input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
Lk_sbo: // sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
Lk_sb1: // sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
Lk_sb2: // sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
//
// Key schedule constants
//
Lk_dksd: // decryption key schedule: invskew x*D
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
Lk_dksb: // decryption key schedule: invskew x*B
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
Lk_dkse: // decryption key schedule: invskew x*E + 0x63
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
Lk_dks9: // decryption key schedule: invskew x*9
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
Lk_rcon: // rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
Lk_opt: // output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
Lk_deskew: // deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.align 6
.text
##
## _aes_preheat
##
## Fills register %r10 -> .aes_consts (so you can -fPIC)
## and %xmm9-%xmm15 as specified below.
##
.def _vpaes_encrypt_preheat
.type 32
.endef
.align 4
_vpaes_encrypt_preheat:
adrp x10, Lk_inv
add x10, x10, :lo12:Lk_inv
movi v17.16b, #0x0f
ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv
ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2
ret
##
## _aes_encrypt_core
##
## AES-encrypt %xmm0.
##
## Inputs:
## %xmm0 = input
## %xmm9-%xmm15 as in _vpaes_preheat
## (%rdx) = scheduled keys
##
## Output in %xmm0
## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
## Preserves %xmm6 - %xmm8 so you get some local vectors
##
##
.def _vpaes_encrypt_core
.type 32
.endef
.align 4
_vpaes_encrypt_core:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward+16
add x11, x11, :lo12:Lk_mc_forward+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
b Lenc_entry
.align 4
Lenc_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
sub w8, w8, #1 // nr--
Lenc_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
ret
.def _vpaes_encrypt_2x
.type 32
.endef
.align 4
_vpaes_encrypt_2x:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward+16
add x11, x11, :lo12:Lk_mc_forward+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
and v9.16b, v15.16b, v17.16b
ushr v8.16b, v15.16b, #4
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
tbl v9.16b, {v20.16b}, v9.16b
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
tbl v10.16b, {v21.16b}, v8.16b
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v8.16b, v9.16b, v16.16b
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
eor v8.16b, v8.16b, v10.16b
b Lenc_2x_entry
.align 4
Lenc_2x_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
tbl v12.16b, {v25.16b}, v10.16b
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
tbl v8.16b, {v24.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
tbl v13.16b, {v27.16b}, v10.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
tbl v10.16b, {v26.16b}, v11.16b
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
tbl v11.16b, {v8.16b}, v1.16b
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
eor v10.16b, v10.16b, v13.16b
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
tbl v8.16b, {v8.16b}, v4.16b
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
eor v11.16b, v11.16b, v10.16b
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
tbl v12.16b, {v11.16b},v1.16b
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
eor v8.16b, v8.16b, v11.16b
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
eor v8.16b, v8.16b, v12.16b
sub w8, w8, #1 // nr--
Lenc_2x_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
and v9.16b, v8.16b, v17.16b
ushr v8.16b, v8.16b, #4
tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
tbl v13.16b, {v19.16b},v9.16b
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
eor v9.16b, v9.16b, v8.16b
tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v11.16b, {v18.16b},v8.16b
tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
tbl v12.16b, {v18.16b},v9.16b
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v11.16b, v11.16b, v13.16b
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
eor v12.16b, v12.16b, v13.16b
tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v10.16b, {v18.16b},v11.16b
tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
tbl v11.16b, {v18.16b},v12.16b
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v10.16b, v10.16b, v9.16b
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
eor v11.16b, v11.16b, v8.16b
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_2x_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
tbl v12.16b, {v22.16b}, v10.16b
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
tbl v8.16b, {v23.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
tbl v1.16b, {v8.16b},v1.16b
ret
########################################################
## ##
## AES key schedule ##
## ##
########################################################
.def _vpaes_key_preheat
.type 32
.endef
.align 4
_vpaes_key_preheat:
adrp x10, Lk_inv
add x10, x10, :lo12:Lk_inv
movi v16.16b, #0x5b // Lk_s63
adrp x11, Lk_sb1
add x11, x11, :lo12:Lk_sb1
movi v17.16b, #0x0f // Lk_s0F
ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt
adrp x10, Lk_dksd
add x10, x10, :lo12:Lk_dksd
ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1
adrp x11, Lk_mc_forward
add x11, x11, :lo12:Lk_mc_forward
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb
ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9
ld1 {v8.2d}, [x10] // Lk_rcon
ld1 {v9.2d}, [x11] // Lk_mc_forward[0]
ret
.def _vpaes_schedule_core
.type 32
.endef
.align 4
_vpaes_schedule_core:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp,#-16]!
add x29,sp,#0
bl _vpaes_key_preheat // load the tables
ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
// input transform
mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
adrp x10, Lk_sr // lea Lk_sr(%rip),%r10
add x10, x10, :lo12:Lk_sr
add x8, x8, x10
// encrypting, output zeroth round key after transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx)
cmp w1, #192 // cmp $192, %esi
b.hi Lschedule_256
b.eq Lschedule_192
// 128: fall though
##
## .schedule_128
##
## 128-bit specific part of key schedule.
##
## This schedule is really simple, because all its parts
## are accomplished by the subroutines.
##
Lschedule_128:
mov x0, #10 // mov $10, %esi
Loop_schedule_128:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // write output
b Loop_schedule_128
##
## .aes_schedule_192
##
## 192-bit specific part of key schedule.
##
## The main body of this schedule is the same as the 128-bit
## schedule, but with more smearing. The long, high side is
## stored in %xmm7 as before, and the short, low side is in
## the high bits of %xmm6.
##
## This schedule is somewhat nastier, however, because each
## round produces 192 bits of key material, or 1.5 round keys.
## Therefore, on each cycle we do 2 rounds and produce 3 round
## keys.
##
.align 4
Lschedule_192:
sub x0, x0, #8
ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
bl _vpaes_schedule_transform // input transform
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
mov x0, #4 // mov $4, %esi
Loop_schedule_192:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0
bl _vpaes_schedule_mangle // save key n
bl _vpaes_schedule_192_smear
bl _vpaes_schedule_mangle // save key n+1
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // save key n+2
bl _vpaes_schedule_192_smear
b Loop_schedule_192
##
## .aes_schedule_256
##
## 256-bit specific part of key schedule.
##
## The structure here is very similar to the 128-bit
## schedule, but with an additional "low side" in
## %xmm6. The low side's rounds are the same as the
## high side's, except no rcon and no rotation.
##
.align 4
Lschedule_256:
ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform // input transform
mov x0, #7 // mov $7, %esi
Loop_schedule_256:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_mangle // output low result
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
// high round
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle
// low round. swap xmm7 and xmm6
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
movi v4.16b, #0
mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
b Loop_schedule_256
##
## .aes_schedule_mangle_last
##
## Mangler for last round of key schedule
## Mangles %xmm0
## when encrypting, outputs out(%xmm0) ^ 63
## when decrypting, outputs unskew(%xmm0)
##
## Always called right before return... jumps to cleanup and exits
##
.align 4
Lschedule_mangle_last:
// schedule last round key from xmm0
adrp x11, Lk_deskew // lea Lk_deskew(%rip),%r11 # prepare to deskew
add x11, x11, :lo12:Lk_deskew
cbnz w3, Lschedule_mangle_last_dec
// encrypting
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
adrp x11, Lk_opt // lea Lk_opt(%rip), %r11 # prepare to output transform
add x11, x11, :lo12:Lk_opt
add x2, x2, #32 // add $32, %rdx
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
Lschedule_mangle_last_dec:
ld1 {v20.2d,v21.2d}, [x11] // reload constants
sub x2, x2, #16 // add $-16, %rdx
eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform // output transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key
// cleanup
eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
ldp x29, x30, [sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
##
## .aes_schedule_192_smear
##
## Smear the short, low side in the 192-bit key schedule.
##
## Inputs:
## %xmm7: high side, b a x y
## %xmm6: low side, d c 0 0
## %xmm13: 0
##
## Outputs:
## %xmm6: b+c+d b+c 0 0
## %xmm0: b+c+d b+c b a
##
.def _vpaes_schedule_192_smear
.type 32
.endef
.align 4
_vpaes_schedule_192_smear:
movi v1.16b, #0
dup v0.4s, v7.s[3]
ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
ret
##
## .aes_schedule_round
##
## Runs one main round of the key schedule on %xmm0, %xmm7
##
## Specifically, runs subbytes on the high dword of %xmm0
## then rotates it by one byte and xors into the low dword of
## %xmm7.
##
## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
## next rcon.
##
## Smears the dwords of %xmm7 by xoring the low into the
## second low, result into third, result into highest.
##
## Returns results in %xmm7 = %xmm0.
## Clobbers %xmm1-%xmm4, %r11.
##
.def _vpaes_schedule_round
.type 32
.endef
.align 4
_vpaes_schedule_round:
// extract rcon from xmm8
movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1
ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
// rotate
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0
// fall through...
// low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
// smear xmm7
ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4
// subbytes
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7
tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
// add in smeared stuff
eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
ret
##
## .aes_schedule_transform
##
## Linear-transform %xmm0 according to tables at (%r11)
##
## Requires that %xmm9 = 0x0F0F... as in preheat
## Output in %xmm0
## Clobbers %xmm1, %xmm2
##
.def _vpaes_schedule_transform
.type 32
.endef
.align 4
_vpaes_schedule_transform:
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0
// vmovdqa (%r11), %xmm2 # lo
tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
// vmovdqa 16(%r11), %xmm1 # hi
tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
ret
##
## .aes_schedule_mangle
##
## Mangle xmm0 from (basis-transformed) standard version
## to our version.
##
## On encrypt,
## xor with 0x63
## multiply by circulant 0,1,1,1
## apply shiftrows transform
##
## On decrypt,
## xor with 0x63
## multiply by "inverse mixcolumns" circulant E,B,D,9
## deskew
## apply shiftrows transform
##
##
## Writes out to (%rdx), and increments or decrements it
## Keeps track of round number mod 4 in %r8
## Preserves xmm0
## Clobbers xmm1-xmm5
##
.def _vpaes_schedule_mangle
.type 32
.endef
.align 4
_vpaes_schedule_mangle:
mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
// vmovdqa .Lk_mc_forward(%rip),%xmm5
// encrypting
eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4
add x2, x2, #16 // add $16, %rdx
tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
Lschedule_mangle_both:
tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
add x8, x8, #48 // add $-16, %r8
and x8, x8, #~(1<<6) // and $0x30, %r8
st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
ret
.globl vpaes_set_encrypt_key
.def vpaes_set_encrypt_key
.type 32
.endef
.align 4
vpaes_set_encrypt_key:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
lsr w9, w1, #5 // shr $5,%eax
add w9, w9, #5 // $5,%eax
str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov w3, #0 // mov $0,%ecx
mov x8, #0x30 // mov $0x30,%r8d
bl _vpaes_schedule_core
eor x0, x0, x0
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl vpaes_ctr32_encrypt_blocks
.def vpaes_ctr32_encrypt_blocks
.type 32
.endef
.align 4
vpaes_ctr32_encrypt_blocks:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
stp d10,d11,[sp,#-16]!
stp d12,d13,[sp,#-16]!
stp d14,d15,[sp,#-16]!
cbz x2, Lctr32_done
// Note, unlike the other functions, x2 here is measured in blocks,
// not bytes.
mov x17, x2
mov x2, x3
// Load the IV and counter portion.
ldr w6, [x4, #12]
ld1 {v7.16b}, [x4]
bl _vpaes_encrypt_preheat
tst x17, #1
rev w6, w6 // The counter is big-endian.
b.eq Lctr32_prep_loop
// Handle one block so the remaining block count is even for
// _vpaes_encrypt_2x.
ld1 {v6.16b}, [x0], #16 // Load input ahead of time
bl _vpaes_encrypt_core
eor v0.16b, v0.16b, v6.16b // XOR input and result
st1 {v0.16b}, [x1], #16
subs x17, x17, #1
// Update the counter.
add w6, w6, #1
rev w7, w6
mov v7.s[3], w7
b.ls Lctr32_done
Lctr32_prep_loop:
// _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x
// uses v14 and v15.
mov v15.16b, v7.16b
mov v14.16b, v7.16b
add w6, w6, #1
rev w7, w6
mov v15.s[3], w7
Lctr32_loop:
ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time
bl _vpaes_encrypt_2x
eor v0.16b, v0.16b, v6.16b // XOR input and result
eor v1.16b, v1.16b, v7.16b // XOR input and result (#2)
st1 {v0.16b,v1.16b}, [x1], #32
subs x17, x17, #2
// Update the counter.
add w7, w6, #1
add w6, w6, #2
rev w7, w7
mov v14.s[3], w7
rev w7, w6
mov v15.s[3], w7
b.hi Lctr32_loop
Lctr32_done:
ldp d14,d15,[sp],#16
ldp d12,d13,[sp],#16
ldp d10,d11,[sp],#16
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mktmansour/MKT-KSA-Geolocation-Security
| 69,148
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha256-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _sha256_block_data_order_nohw
.private_extern _sha256_block_data_order_nohw
.p2align 4
_sha256_block_data_order_nohw:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $64+32,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
L$prologue:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp L$loop
.p2align 4
L$loop:
movl %ebx,%edi
leaq K256(%rip),%rbp
xorl %ecx,%edi
movl 0(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 4(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 8(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 12(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 16(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 20(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 24(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 28(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
addl %r14d,%eax
movl 32(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 36(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 40(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 44(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 48(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 52(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 56(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 60(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
jmp L$rounds_16_xx
.p2align 4
L$rounds_16_xx:
movl 4(%rsp),%r13d
movl 56(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 36(%rsp),%r12d
addl 0(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 8(%rsp),%r13d
movl 60(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 40(%rsp),%r12d
addl 4(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 12(%rsp),%r13d
movl 0(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 44(%rsp),%r12d
addl 8(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 16(%rsp),%r13d
movl 4(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 48(%rsp),%r12d
addl 12(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 20(%rsp),%r13d
movl 8(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 52(%rsp),%r12d
addl 16(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 24(%rsp),%r13d
movl 12(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 56(%rsp),%r12d
addl 20(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 28(%rsp),%r13d
movl 16(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 60(%rsp),%r12d
addl 24(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 32(%rsp),%r13d
movl 20(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 0(%rsp),%r12d
addl 28(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
movl 36(%rsp),%r13d
movl 24(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 4(%rsp),%r12d
addl 32(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 40(%rsp),%r13d
movl 28(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 8(%rsp),%r12d
addl 36(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 44(%rsp),%r13d
movl 32(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 12(%rsp),%r12d
addl 40(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 48(%rsp),%r13d
movl 36(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 16(%rsp),%r12d
addl 44(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 52(%rsp),%r13d
movl 40(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 20(%rsp),%r12d
addl 48(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 56(%rsp),%r13d
movl 44(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 24(%rsp),%r12d
addl 52(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 60(%rsp),%r13d
movl 48(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 28(%rsp),%r12d
addl 56(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 0(%rsp),%r13d
movl 52(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 32(%rsp),%r12d
addl 60(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
cmpb $0,3(%rbp)
jnz L$rounds_16_xx
movq 64+0(%rsp),%rdi
addl %r14d,%eax
leaq 64(%rsi),%rsi
addl 0(%rdi),%eax
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb L$loop
movq 88(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue:
ret
.section __DATA,__const
.p2align 6
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl _sha256_block_data_order_hw
.private_extern _sha256_block_data_order_hw
.p2align 6
_sha256_block_data_order_hw:
_CET_ENDBR
leaq K256+128(%rip),%rcx
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa 512-128(%rcx),%xmm7
pshufd $0x1b,%xmm1,%xmm0
pshufd $0xb1,%xmm1,%xmm1
pshufd $0x1b,%xmm2,%xmm2
movdqa %xmm7,%xmm8
.byte 102,15,58,15,202,8
punpcklqdq %xmm0,%xmm2
jmp L$oop_shaext
.p2align 4
L$oop_shaext:
movdqu (%rsi),%xmm3
movdqu 16(%rsi),%xmm4
movdqu 32(%rsi),%xmm5
.byte 102,15,56,0,223
movdqu 48(%rsi),%xmm6
movdqa 0-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 102,15,56,0,231
movdqa %xmm2,%xmm10
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
nop
movdqa %xmm1,%xmm9
.byte 15,56,203,202
movdqa 32-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 102,15,56,0,239
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
leaq 64(%rsi),%rsi
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 64-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 102,15,56,0,247
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 96-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 128-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 160-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 192-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 224-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 256-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 288-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 320-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 352-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 384-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 416-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
.byte 15,56,203,202
paddd %xmm7,%xmm6
movdqa 448-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
.byte 15,56,205,245
movdqa %xmm8,%xmm7
.byte 15,56,203,202
movdqa 480-128(%rcx),%xmm0
paddd %xmm6,%xmm0
nop
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
decq %rdx
nop
.byte 15,56,203,202
paddd %xmm10,%xmm2
paddd %xmm9,%xmm1
jnz L$oop_shaext
pshufd $0xb1,%xmm2,%xmm2
pshufd $0x1b,%xmm1,%xmm7
pshufd $0xb1,%xmm1,%xmm1
punpckhqdq %xmm2,%xmm1
.byte 102,15,58,15,215,8
movdqu %xmm1,(%rdi)
movdqu %xmm2,16(%rdi)
ret
.globl _sha256_block_data_order_ssse3
.private_extern _sha256_block_data_order_ssse3
.p2align 6
_sha256_block_data_order_ssse3:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
L$prologue_ssse3:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp L$loop_ssse3
.p2align 4
L$loop_ssse3:
movdqa K256+512(%rip),%xmm7
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
.byte 102,15,56,0,199
movdqu 48(%rsi),%xmm3
leaq K256(%rip),%rbp
.byte 102,15,56,0,207
movdqa 0(%rbp),%xmm4
movdqa 32(%rbp),%xmm5
.byte 102,15,56,0,215
paddd %xmm0,%xmm4
movdqa 64(%rbp),%xmm6
.byte 102,15,56,0,223
movdqa 96(%rbp),%xmm7
paddd %xmm1,%xmm5
paddd %xmm2,%xmm6
paddd %xmm3,%xmm7
movdqa %xmm4,0(%rsp)
movl %eax,%r14d
movdqa %xmm5,16(%rsp)
movl %ebx,%edi
movdqa %xmm6,32(%rsp)
xorl %ecx,%edi
movdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp L$ssse3_00_47
.p2align 4
L$ssse3_00_47:
subq $-128,%rbp
rorl $14,%r13d
movdqa %xmm1,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm3,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,224,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,250,4
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm3,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 4(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm0
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm0
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm0,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 0(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm0,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,0(%rsp)
rorl $14,%r13d
movdqa %xmm2,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm0,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,225,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,251,4
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm0,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 20(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm1
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm1
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm1,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 32(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm1,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,16(%rsp)
rorl $14,%r13d
movdqa %xmm3,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm1,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,226,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,248,4
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm1,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 36(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm2
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm2
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm2,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 64(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm2,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,32(%rsp)
rorl $14,%r13d
movdqa %xmm0,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm2,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,227,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,249,4
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm2,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 52(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm3
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm3
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm3,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 96(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm3,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne L$ssse3_00_47
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb L$loop_ssse3
movq 88(%rsp),%rsi
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue_ssse3:
ret
.globl _sha256_block_data_order_avx
.private_extern _sha256_block_data_order_avx
.p2align 6
_sha256_block_data_order_avx:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
L$prologue_avx:
vzeroupper
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
vmovdqa K256+512+32(%rip),%xmm8
vmovdqa K256+512+64(%rip),%xmm9
jmp L$loop_avx
.p2align 4
L$loop_avx:
vmovdqa K256+512(%rip),%xmm7
vmovdqu 0(%rsi),%xmm0
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm7,%xmm0,%xmm0
leaq K256(%rip),%rbp
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd 0(%rbp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 32(%rbp),%xmm1,%xmm5
vpaddd 64(%rbp),%xmm2,%xmm6
vpaddd 96(%rbp),%xmm3,%xmm7
vmovdqa %xmm4,0(%rsp)
movl %eax,%r14d
vmovdqa %xmm5,16(%rsp)
movl %ebx,%edi
vmovdqa %xmm6,32(%rsp)
xorl %ecx,%edi
vmovdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp L$avx_00_47
.p2align 4
L$avx_00_47:
subq $-128,%rbp
vpalignr $4,%xmm0,%xmm1,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm2,%xmm3,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm0,%xmm0
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm3,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm0,%xmm0
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm0,%xmm0
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
vpshufd $80,%xmm0,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm0,%xmm0
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 0(%rbp),%xmm0,%xmm6
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,0(%rsp)
vpalignr $4,%xmm1,%xmm2,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm3,%xmm0,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm1,%xmm1
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm0,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm1,%xmm1
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm1,%xmm1
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
vpshufd $80,%xmm1,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm1,%xmm1
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 32(%rbp),%xmm1,%xmm6
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,16(%rsp)
vpalignr $4,%xmm2,%xmm3,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm0,%xmm1,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm2,%xmm2
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm1,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm2,%xmm2
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm2,%xmm2
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
vpshufd $80,%xmm2,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm2,%xmm2
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 64(%rbp),%xmm2,%xmm6
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,32(%rsp)
vpalignr $4,%xmm3,%xmm0,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm1,%xmm2,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm3,%xmm3
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm2,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm3,%xmm3
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm3,%xmm3
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
vpshufd $80,%xmm3,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm3,%xmm3
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 96(%rbp),%xmm3,%xmm6
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne L$avx_00_47
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb L$loop_avx
movq 88(%rsp),%rsi
vzeroupper
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$epilogue_avx:
ret
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 48,645
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha512-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl sha512_block_data_order_nohw
.hidden sha512_block_data_order_nohw
.type sha512_block_data_order_nohw,@function
.align 16
sha512_block_data_order_nohw:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $128+32,%rsp
leaq (%rsi,%rdx,8),%rdx
andq $-64,%rsp
movq %rdi,128+0(%rsp)
movq %rsi,128+8(%rsp)
movq %rdx,128+16(%rsp)
movq %rax,152(%rsp)
.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08
.Lprologue:
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
movq 24(%rdi),%rdx
movq 32(%rdi),%r8
movq 40(%rdi),%r9
movq 48(%rdi),%r10
movq 56(%rdi),%r11
jmp .Lloop
.align 16
.Lloop:
movq %rbx,%rdi
leaq K512(%rip),%rbp
xorq %rcx,%rdi
movq 0(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,0(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
addq %r14,%r11
movq 8(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,8(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
addq %r14,%r10
movq 16(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,16(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
addq %r14,%r9
movq 24(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,24(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
addq %r14,%r8
movq 32(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,32(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
addq %r14,%rdx
movq 40(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,40(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
addq %r14,%rcx
movq 48(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,48(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
addq %r14,%rbx
movq 56(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,56(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
addq %r14,%rax
movq 64(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,64(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
addq %r14,%r11
movq 72(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,72(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
addq %r14,%r10
movq 80(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,80(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
addq %r14,%r9
movq 88(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,88(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
addq %r14,%r8
movq 96(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,96(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
addq %r14,%rdx
movq 104(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,104(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
addq %r14,%rcx
movq 112(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,112(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
addq %r14,%rbx
movq 120(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,120(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
movq 8(%rsp),%r13
movq 112(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rax
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 72(%rsp),%r12
addq 0(%rsp),%r12
movq %r8,%r13
addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,0(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
movq 16(%rsp),%r13
movq 120(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r11
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 80(%rsp),%r12
addq 8(%rsp),%r12
movq %rdx,%r13
addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,8(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
movq 24(%rsp),%r13
movq 0(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r10
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 88(%rsp),%r12
addq 16(%rsp),%r12
movq %rcx,%r13
addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,16(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
movq 32(%rsp),%r13
movq 8(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r9
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 96(%rsp),%r12
addq 24(%rsp),%r12
movq %rbx,%r13
addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,24(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
movq 40(%rsp),%r13
movq 16(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r8
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 104(%rsp),%r12
addq 32(%rsp),%r12
movq %rax,%r13
addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,32(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
movq 48(%rsp),%r13
movq 24(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rdx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 112(%rsp),%r12
addq 40(%rsp),%r12
movq %r11,%r13
addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,40(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
movq 56(%rsp),%r13
movq 32(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rcx
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 120(%rsp),%r12
addq 48(%rsp),%r12
movq %r10,%r13
addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,48(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
movq 64(%rsp),%r13
movq 40(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rbx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 0(%rsp),%r12
addq 56(%rsp),%r12
movq %r9,%r13
addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,56(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
movq 72(%rsp),%r13
movq 48(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rax
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 8(%rsp),%r12
addq 64(%rsp),%r12
movq %r8,%r13
addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
xorq %r8,%r13
rorq $5,%r14
xorq %r10,%r15
movq %r12,64(%rsp)
xorq %rax,%r14
andq %r8,%r15
rorq $4,%r13
addq %r11,%r12
xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
addq %r15,%r12
movq %rax,%r15
addq (%rbp),%r12
xorq %rax,%r14
xorq %rbx,%r15
rorq $14,%r13
movq %rbx,%r11
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
leaq 8(%rbp),%rbp
movq 80(%rsp),%r13
movq 56(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r11
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 16(%rsp),%r12
addq 72(%rsp),%r12
movq %rdx,%r13
addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
movq %r8,%rdi
xorq %rdx,%r13
rorq $5,%r14
xorq %r9,%rdi
movq %r12,72(%rsp)
xorq %r11,%r14
andq %rdx,%rdi
rorq $4,%r13
addq %r10,%r12
xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
addq %rdi,%r12
movq %r11,%rdi
addq (%rbp),%r12
xorq %r11,%r14
xorq %rax,%rdi
rorq $14,%r13
movq %rax,%r10
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
leaq 24(%rbp),%rbp
movq 88(%rsp),%r13
movq 64(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r10
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 24(%rsp),%r12
addq 80(%rsp),%r12
movq %rcx,%r13
addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
xorq %rcx,%r13
rorq $5,%r14
xorq %r8,%r15
movq %r12,80(%rsp)
xorq %r10,%r14
andq %rcx,%r15
rorq $4,%r13
addq %r9,%r12
xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
addq %r15,%r12
movq %r10,%r15
addq (%rbp),%r12
xorq %r10,%r14
xorq %r11,%r15
rorq $14,%r13
movq %r11,%r9
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
leaq 8(%rbp),%rbp
movq 96(%rsp),%r13
movq 72(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%r9
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 32(%rsp),%r12
addq 88(%rsp),%r12
movq %rbx,%r13
addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
movq %rcx,%rdi
xorq %rbx,%r13
rorq $5,%r14
xorq %rdx,%rdi
movq %r12,88(%rsp)
xorq %r9,%r14
andq %rbx,%rdi
rorq $4,%r13
addq %r8,%r12
xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
addq %rdi,%r12
movq %r9,%rdi
addq (%rbp),%r12
xorq %r9,%r14
xorq %r10,%rdi
rorq $14,%r13
movq %r10,%r8
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
leaq 24(%rbp),%rbp
movq 104(%rsp),%r13
movq 80(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%r8
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 40(%rsp),%r12
addq 96(%rsp),%r12
movq %rax,%r13
addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
xorq %rax,%r13
rorq $5,%r14
xorq %rcx,%r15
movq %r12,96(%rsp)
xorq %r8,%r14
andq %rax,%r15
rorq $4,%r13
addq %rdx,%r12
xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
addq %r15,%r12
movq %r8,%r15
addq (%rbp),%r12
xorq %r8,%r14
xorq %r9,%r15
rorq $14,%r13
movq %r9,%rdx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
leaq 8(%rbp),%rbp
movq 112(%rsp),%r13
movq 88(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rdx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 48(%rsp),%r12
addq 104(%rsp),%r12
movq %r11,%r13
addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
movq %rax,%rdi
xorq %r11,%r13
rorq $5,%r14
xorq %rbx,%rdi
movq %r12,104(%rsp)
xorq %rdx,%r14
andq %r11,%rdi
rorq $4,%r13
addq %rcx,%r12
xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
addq %rdi,%r12
movq %rdx,%rdi
addq (%rbp),%r12
xorq %rdx,%r14
xorq %r8,%rdi
rorq $14,%r13
movq %r8,%rcx
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
leaq 24(%rbp),%rbp
movq 120(%rsp),%r13
movq 96(%rsp),%r15
movq %r13,%r12
rorq $7,%r13
addq %r14,%rcx
movq %r15,%r14
rorq $42,%r15
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
xorq %r13,%r12
xorq %r14,%r15
addq 56(%rsp),%r12
addq 112(%rsp),%r12
movq %r10,%r13
addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
xorq %r10,%r13
rorq $5,%r14
xorq %rax,%r15
movq %r12,112(%rsp)
xorq %rcx,%r14
andq %r10,%r15
rorq $4,%r13
addq %rbx,%r12
xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
addq %r15,%r12
movq %rcx,%r15
addq (%rbp),%r12
xorq %rcx,%r14
xorq %rdx,%r15
rorq $14,%r13
movq %rdx,%rbx
andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
leaq 8(%rbp),%rbp
movq 0(%rsp),%r13
movq 104(%rsp),%rdi
movq %r13,%r12
rorq $7,%r13
addq %r14,%rbx
movq %rdi,%r14
rorq $42,%rdi
xorq %r12,%r13
shrq $7,%r12
rorq $1,%r13
xorq %r14,%rdi
shrq $6,%r14
rorq $19,%rdi
xorq %r13,%r12
xorq %r14,%rdi
addq 64(%rsp),%r12
addq 120(%rsp),%r12
movq %r9,%r13
addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
movq %r10,%rdi
xorq %r9,%r13
rorq $5,%r14
xorq %r11,%rdi
movq %r12,120(%rsp)
xorq %rbx,%r14
andq %r9,%rdi
rorq $4,%r13
addq %rax,%r12
xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
addq %rdi,%r12
movq %rbx,%rdi
addq (%rbp),%r12
xorq %rbx,%r14
xorq %rcx,%rdi
rorq $14,%r13
movq %rcx,%rax
andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
leaq 24(%rbp),%rbp
cmpb $0,7(%rbp)
jnz .Lrounds_16_xx
movq 128+0(%rsp),%rdi
addq %r14,%rax
leaq 128(%rsi),%rsi
addq 0(%rdi),%rax
addq 8(%rdi),%rbx
addq 16(%rdi),%rcx
addq 24(%rdi),%rdx
addq 32(%rdi),%r8
addq 40(%rdi),%r9
addq 48(%rdi),%r10
addq 56(%rdi),%r11
cmpq 128+16(%rsp),%rsi
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
movq %rcx,16(%rdi)
movq %rdx,24(%rdi)
movq %r8,32(%rdi)
movq %r9,40(%rdi)
movq %r10,48(%rdi)
movq %r11,56(%rdi)
jb .Lloop
movq 152(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue:
ret
.cfi_endproc
.size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw
.section .rodata
.align 64
.type K512,@object
K512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl sha512_block_data_order_avx
.hidden sha512_block_data_order_avx
.type sha512_block_data_order_avx,@function
.align 64
sha512_block_data_order_avx:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $160,%rsp
leaq (%rsi,%rdx,8),%rdx
andq $-64,%rsp
movq %rdi,128+0(%rsp)
movq %rsi,128+8(%rsp)
movq %rdx,128+16(%rsp)
movq %rax,152(%rsp)
.cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08
.Lprologue_avx:
vzeroupper
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
movq 24(%rdi),%rdx
movq 32(%rdi),%r8
movq 40(%rdi),%r9
movq 48(%rdi),%r10
movq 56(%rdi),%r11
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa K512+1280(%rip),%xmm11
vmovdqu 0(%rsi),%xmm0
leaq K512+128(%rip),%rbp
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vpshufb %xmm11,%xmm0,%xmm0
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm11,%xmm1,%xmm1
vmovdqu 64(%rsi),%xmm4
vpshufb %xmm11,%xmm2,%xmm2
vmovdqu 80(%rsi),%xmm5
vpshufb %xmm11,%xmm3,%xmm3
vmovdqu 96(%rsi),%xmm6
vpshufb %xmm11,%xmm4,%xmm4
vmovdqu 112(%rsi),%xmm7
vpshufb %xmm11,%xmm5,%xmm5
vpaddq -128(%rbp),%xmm0,%xmm8
vpshufb %xmm11,%xmm6,%xmm6
vpaddq -96(%rbp),%xmm1,%xmm9
vpshufb %xmm11,%xmm7,%xmm7
vpaddq -64(%rbp),%xmm2,%xmm10
vpaddq -32(%rbp),%xmm3,%xmm11
vmovdqa %xmm8,0(%rsp)
vpaddq 0(%rbp),%xmm4,%xmm8
vmovdqa %xmm9,16(%rsp)
vpaddq 32(%rbp),%xmm5,%xmm9
vmovdqa %xmm10,32(%rsp)
vpaddq 64(%rbp),%xmm6,%xmm10
vmovdqa %xmm11,48(%rsp)
vpaddq 96(%rbp),%xmm7,%xmm11
vmovdqa %xmm8,64(%rsp)
movq %rax,%r14
vmovdqa %xmm9,80(%rsp)
movq %rbx,%rdi
vmovdqa %xmm10,96(%rsp)
xorq %rcx,%rdi
vmovdqa %xmm11,112(%rsp)
movq %r8,%r13
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
addq $256,%rbp
vpalignr $8,%xmm0,%xmm1,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rax
vpalignr $8,%xmm4,%xmm5,%xmm11
movq %r9,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r8,%r13
xorq %r10,%r12
vpaddq %xmm11,%xmm0,%xmm0
shrdq $4,%r13,%r13
xorq %rax,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r8,%r12
xorq %r8,%r13
vpsllq $56,%xmm8,%xmm9
addq 0(%rsp),%r11
movq %rax,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r10,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rbx,%r15
addq %r12,%r11
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rax,%r14
addq %r13,%r11
vpxor %xmm10,%xmm8,%xmm8
xorq %rbx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm7,%xmm11
addq %r11,%rdx
addq %rdi,%r11
vpxor %xmm9,%xmm8,%xmm8
movq %rdx,%r13
addq %r11,%r14
vpsllq $3,%xmm7,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r11
vpaddq %xmm8,%xmm0,%xmm0
movq %r8,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm7,%xmm9
xorq %rdx,%r13
xorq %r9,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r11,%r14
vpsllq $42,%xmm10,%xmm10
andq %rdx,%r12
xorq %rdx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 8(%rsp),%r10
movq %r11,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r9,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rax,%rdi
addq %r12,%r10
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm0,%xmm0
xorq %r11,%r14
addq %r13,%r10
vpaddq -128(%rbp),%xmm0,%xmm10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
vmovdqa %xmm10,0(%rsp)
vpalignr $8,%xmm1,%xmm2,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r10
vpalignr $8,%xmm5,%xmm6,%xmm11
movq %rdx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rcx,%r13
xorq %r8,%r12
vpaddq %xmm11,%xmm1,%xmm1
shrdq $4,%r13,%r13
xorq %r10,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rcx,%r12
xorq %rcx,%r13
vpsllq $56,%xmm8,%xmm9
addq 16(%rsp),%r9
movq %r10,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r8,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r11,%r15
addq %r12,%r9
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r10,%r14
addq %r13,%r9
vpxor %xmm10,%xmm8,%xmm8
xorq %r11,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm0,%xmm11
addq %r9,%rbx
addq %rdi,%r9
vpxor %xmm9,%xmm8,%xmm8
movq %rbx,%r13
addq %r9,%r14
vpsllq $3,%xmm0,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r9
vpaddq %xmm8,%xmm1,%xmm1
movq %rcx,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm0,%xmm9
xorq %rbx,%r13
xorq %rdx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r9,%r14
vpsllq $42,%xmm10,%xmm10
andq %rbx,%r12
xorq %rbx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 24(%rsp),%r8
movq %r9,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rdx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r10,%rdi
addq %r12,%r8
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm1,%xmm1
xorq %r9,%r14
addq %r13,%r8
vpaddq -96(%rbp),%xmm1,%xmm10
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
vmovdqa %xmm10,16(%rsp)
vpalignr $8,%xmm2,%xmm3,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r8
vpalignr $8,%xmm6,%xmm7,%xmm11
movq %rbx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rax,%r13
xorq %rcx,%r12
vpaddq %xmm11,%xmm2,%xmm2
shrdq $4,%r13,%r13
xorq %r8,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rax,%r12
xorq %rax,%r13
vpsllq $56,%xmm8,%xmm9
addq 32(%rsp),%rdx
movq %r8,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rcx,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r9,%r15
addq %r12,%rdx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r8,%r14
addq %r13,%rdx
vpxor %xmm10,%xmm8,%xmm8
xorq %r9,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm1,%xmm11
addq %rdx,%r11
addq %rdi,%rdx
vpxor %xmm9,%xmm8,%xmm8
movq %r11,%r13
addq %rdx,%r14
vpsllq $3,%xmm1,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rdx
vpaddq %xmm8,%xmm2,%xmm2
movq %rax,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm1,%xmm9
xorq %r11,%r13
xorq %rbx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rdx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r11,%r12
xorq %r11,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 40(%rsp),%rcx
movq %rdx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rbx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r8,%rdi
addq %r12,%rcx
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm2,%xmm2
xorq %rdx,%r14
addq %r13,%rcx
vpaddq -64(%rbp),%xmm2,%xmm10
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
vmovdqa %xmm10,32(%rsp)
vpalignr $8,%xmm3,%xmm4,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rcx
vpalignr $8,%xmm7,%xmm0,%xmm11
movq %r11,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r10,%r13
xorq %rax,%r12
vpaddq %xmm11,%xmm3,%xmm3
shrdq $4,%r13,%r13
xorq %rcx,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r10,%r12
xorq %r10,%r13
vpsllq $56,%xmm8,%xmm9
addq 48(%rsp),%rbx
movq %rcx,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rax,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rdx,%r15
addq %r12,%rbx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rcx,%r14
addq %r13,%rbx
vpxor %xmm10,%xmm8,%xmm8
xorq %rdx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm2,%xmm11
addq %rbx,%r9
addq %rdi,%rbx
vpxor %xmm9,%xmm8,%xmm8
movq %r9,%r13
addq %rbx,%r14
vpsllq $3,%xmm2,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rbx
vpaddq %xmm8,%xmm3,%xmm3
movq %r10,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm2,%xmm9
xorq %r9,%r13
xorq %r11,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rbx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r9,%r12
xorq %r9,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 56(%rsp),%rax
movq %rbx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r11,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rcx,%rdi
addq %r12,%rax
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm3,%xmm3
xorq %rbx,%r14
addq %r13,%rax
vpaddq -32(%rbp),%xmm3,%xmm10
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
vmovdqa %xmm10,48(%rsp)
vpalignr $8,%xmm4,%xmm5,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rax
vpalignr $8,%xmm0,%xmm1,%xmm11
movq %r9,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r8,%r13
xorq %r10,%r12
vpaddq %xmm11,%xmm4,%xmm4
shrdq $4,%r13,%r13
xorq %rax,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r8,%r12
xorq %r8,%r13
vpsllq $56,%xmm8,%xmm9
addq 64(%rsp),%r11
movq %rax,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r10,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rbx,%r15
addq %r12,%r11
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rax,%r14
addq %r13,%r11
vpxor %xmm10,%xmm8,%xmm8
xorq %rbx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm3,%xmm11
addq %r11,%rdx
addq %rdi,%r11
vpxor %xmm9,%xmm8,%xmm8
movq %rdx,%r13
addq %r11,%r14
vpsllq $3,%xmm3,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r11
vpaddq %xmm8,%xmm4,%xmm4
movq %r8,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm3,%xmm9
xorq %rdx,%r13
xorq %r9,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r11,%r14
vpsllq $42,%xmm10,%xmm10
andq %rdx,%r12
xorq %rdx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 72(%rsp),%r10
movq %r11,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r9,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rax,%rdi
addq %r12,%r10
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm4,%xmm4
xorq %r11,%r14
addq %r13,%r10
vpaddq 0(%rbp),%xmm4,%xmm10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
vmovdqa %xmm10,64(%rsp)
vpalignr $8,%xmm5,%xmm6,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r10
vpalignr $8,%xmm1,%xmm2,%xmm11
movq %rdx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rcx,%r13
xorq %r8,%r12
vpaddq %xmm11,%xmm5,%xmm5
shrdq $4,%r13,%r13
xorq %r10,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rcx,%r12
xorq %rcx,%r13
vpsllq $56,%xmm8,%xmm9
addq 80(%rsp),%r9
movq %r10,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %r8,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r11,%r15
addq %r12,%r9
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r10,%r14
addq %r13,%r9
vpxor %xmm10,%xmm8,%xmm8
xorq %r11,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm4,%xmm11
addq %r9,%rbx
addq %rdi,%r9
vpxor %xmm9,%xmm8,%xmm8
movq %rbx,%r13
addq %r9,%r14
vpsllq $3,%xmm4,%xmm10
shrdq $23,%r13,%r13
movq %r14,%r9
vpaddq %xmm8,%xmm5,%xmm5
movq %rcx,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm4,%xmm9
xorq %rbx,%r13
xorq %rdx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %r9,%r14
vpsllq $42,%xmm10,%xmm10
andq %rbx,%r12
xorq %rbx,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 88(%rsp),%r8
movq %r9,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rdx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r10,%rdi
addq %r12,%r8
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm5,%xmm5
xorq %r9,%r14
addq %r13,%r8
vpaddq 32(%rbp),%xmm5,%xmm10
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
vmovdqa %xmm10,80(%rsp)
vpalignr $8,%xmm6,%xmm7,%xmm8
shrdq $23,%r13,%r13
movq %r14,%r8
vpalignr $8,%xmm2,%xmm3,%xmm11
movq %rbx,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %rax,%r13
xorq %rcx,%r12
vpaddq %xmm11,%xmm6,%xmm6
shrdq $4,%r13,%r13
xorq %r8,%r14
vpsrlq $7,%xmm8,%xmm11
andq %rax,%r12
xorq %rax,%r13
vpsllq $56,%xmm8,%xmm9
addq 96(%rsp),%rdx
movq %r8,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rcx,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %r9,%r15
addq %r12,%rdx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %r8,%r14
addq %r13,%rdx
vpxor %xmm10,%xmm8,%xmm8
xorq %r9,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm5,%xmm11
addq %rdx,%r11
addq %rdi,%rdx
vpxor %xmm9,%xmm8,%xmm8
movq %r11,%r13
addq %rdx,%r14
vpsllq $3,%xmm5,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rdx
vpaddq %xmm8,%xmm6,%xmm6
movq %rax,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm5,%xmm9
xorq %r11,%r13
xorq %rbx,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rdx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r11,%r12
xorq %r11,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 104(%rsp),%rcx
movq %rdx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %rbx,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %r8,%rdi
addq %r12,%rcx
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm6,%xmm6
xorq %rdx,%r14
addq %r13,%rcx
vpaddq 64(%rbp),%xmm6,%xmm10
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
vmovdqa %xmm10,96(%rsp)
vpalignr $8,%xmm7,%xmm0,%xmm8
shrdq $23,%r13,%r13
movq %r14,%rcx
vpalignr $8,%xmm3,%xmm4,%xmm11
movq %r11,%r12
shrdq $5,%r14,%r14
vpsrlq $1,%xmm8,%xmm10
xorq %r10,%r13
xorq %rax,%r12
vpaddq %xmm11,%xmm7,%xmm7
shrdq $4,%r13,%r13
xorq %rcx,%r14
vpsrlq $7,%xmm8,%xmm11
andq %r10,%r12
xorq %r10,%r13
vpsllq $56,%xmm8,%xmm9
addq 112(%rsp),%rbx
movq %rcx,%r15
vpxor %xmm10,%xmm11,%xmm8
xorq %rax,%r12
shrdq $6,%r14,%r14
vpsrlq $7,%xmm10,%xmm10
xorq %rdx,%r15
addq %r12,%rbx
vpxor %xmm9,%xmm8,%xmm8
shrdq $14,%r13,%r13
andq %r15,%rdi
vpsllq $7,%xmm9,%xmm9
xorq %rcx,%r14
addq %r13,%rbx
vpxor %xmm10,%xmm8,%xmm8
xorq %rdx,%rdi
shrdq $28,%r14,%r14
vpsrlq $6,%xmm6,%xmm11
addq %rbx,%r9
addq %rdi,%rbx
vpxor %xmm9,%xmm8,%xmm8
movq %r9,%r13
addq %rbx,%r14
vpsllq $3,%xmm6,%xmm10
shrdq $23,%r13,%r13
movq %r14,%rbx
vpaddq %xmm8,%xmm7,%xmm7
movq %r10,%r12
shrdq $5,%r14,%r14
vpsrlq $19,%xmm6,%xmm9
xorq %r9,%r13
xorq %r11,%r12
vpxor %xmm10,%xmm11,%xmm11
shrdq $4,%r13,%r13
xorq %rbx,%r14
vpsllq $42,%xmm10,%xmm10
andq %r9,%r12
xorq %r9,%r13
vpxor %xmm9,%xmm11,%xmm11
addq 120(%rsp),%rax
movq %rbx,%rdi
vpsrlq $42,%xmm9,%xmm9
xorq %r11,%r12
shrdq $6,%r14,%r14
vpxor %xmm10,%xmm11,%xmm11
xorq %rcx,%rdi
addq %r12,%rax
vpxor %xmm9,%xmm11,%xmm11
shrdq $14,%r13,%r13
andq %rdi,%r15
vpaddq %xmm11,%xmm7,%xmm7
xorq %rbx,%r14
addq %r13,%rax
vpaddq 96(%rbp),%xmm7,%xmm10
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
vmovdqa %xmm10,112(%rsp)
cmpb $0,135(%rbp)
jne .Lavx_00_47
shrdq $23,%r13,%r13
movq %r14,%rax
movq %r9,%r12
shrdq $5,%r14,%r14
xorq %r8,%r13
xorq %r10,%r12
shrdq $4,%r13,%r13
xorq %rax,%r14
andq %r8,%r12
xorq %r8,%r13
addq 0(%rsp),%r11
movq %rax,%r15
xorq %r10,%r12
shrdq $6,%r14,%r14
xorq %rbx,%r15
addq %r12,%r11
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rax,%r14
addq %r13,%r11
xorq %rbx,%rdi
shrdq $28,%r14,%r14
addq %r11,%rdx
addq %rdi,%r11
movq %rdx,%r13
addq %r11,%r14
shrdq $23,%r13,%r13
movq %r14,%r11
movq %r8,%r12
shrdq $5,%r14,%r14
xorq %rdx,%r13
xorq %r9,%r12
shrdq $4,%r13,%r13
xorq %r11,%r14
andq %rdx,%r12
xorq %rdx,%r13
addq 8(%rsp),%r10
movq %r11,%rdi
xorq %r9,%r12
shrdq $6,%r14,%r14
xorq %rax,%rdi
addq %r12,%r10
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r11,%r14
addq %r13,%r10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
shrdq $23,%r13,%r13
movq %r14,%r10
movq %rdx,%r12
shrdq $5,%r14,%r14
xorq %rcx,%r13
xorq %r8,%r12
shrdq $4,%r13,%r13
xorq %r10,%r14
andq %rcx,%r12
xorq %rcx,%r13
addq 16(%rsp),%r9
movq %r10,%r15
xorq %r8,%r12
shrdq $6,%r14,%r14
xorq %r11,%r15
addq %r12,%r9
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r10,%r14
addq %r13,%r9
xorq %r11,%rdi
shrdq $28,%r14,%r14
addq %r9,%rbx
addq %rdi,%r9
movq %rbx,%r13
addq %r9,%r14
shrdq $23,%r13,%r13
movq %r14,%r9
movq %rcx,%r12
shrdq $5,%r14,%r14
xorq %rbx,%r13
xorq %rdx,%r12
shrdq $4,%r13,%r13
xorq %r9,%r14
andq %rbx,%r12
xorq %rbx,%r13
addq 24(%rsp),%r8
movq %r9,%rdi
xorq %rdx,%r12
shrdq $6,%r14,%r14
xorq %r10,%rdi
addq %r12,%r8
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r9,%r14
addq %r13,%r8
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
shrdq $23,%r13,%r13
movq %r14,%r8
movq %rbx,%r12
shrdq $5,%r14,%r14
xorq %rax,%r13
xorq %rcx,%r12
shrdq $4,%r13,%r13
xorq %r8,%r14
andq %rax,%r12
xorq %rax,%r13
addq 32(%rsp),%rdx
movq %r8,%r15
xorq %rcx,%r12
shrdq $6,%r14,%r14
xorq %r9,%r15
addq %r12,%rdx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r8,%r14
addq %r13,%rdx
xorq %r9,%rdi
shrdq $28,%r14,%r14
addq %rdx,%r11
addq %rdi,%rdx
movq %r11,%r13
addq %rdx,%r14
shrdq $23,%r13,%r13
movq %r14,%rdx
movq %rax,%r12
shrdq $5,%r14,%r14
xorq %r11,%r13
xorq %rbx,%r12
shrdq $4,%r13,%r13
xorq %rdx,%r14
andq %r11,%r12
xorq %r11,%r13
addq 40(%rsp),%rcx
movq %rdx,%rdi
xorq %rbx,%r12
shrdq $6,%r14,%r14
xorq %r8,%rdi
addq %r12,%rcx
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rdx,%r14
addq %r13,%rcx
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
shrdq $23,%r13,%r13
movq %r14,%rcx
movq %r11,%r12
shrdq $5,%r14,%r14
xorq %r10,%r13
xorq %rax,%r12
shrdq $4,%r13,%r13
xorq %rcx,%r14
andq %r10,%r12
xorq %r10,%r13
addq 48(%rsp),%rbx
movq %rcx,%r15
xorq %rax,%r12
shrdq $6,%r14,%r14
xorq %rdx,%r15
addq %r12,%rbx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rcx,%r14
addq %r13,%rbx
xorq %rdx,%rdi
shrdq $28,%r14,%r14
addq %rbx,%r9
addq %rdi,%rbx
movq %r9,%r13
addq %rbx,%r14
shrdq $23,%r13,%r13
movq %r14,%rbx
movq %r10,%r12
shrdq $5,%r14,%r14
xorq %r9,%r13
xorq %r11,%r12
shrdq $4,%r13,%r13
xorq %rbx,%r14
andq %r9,%r12
xorq %r9,%r13
addq 56(%rsp),%rax
movq %rbx,%rdi
xorq %r11,%r12
shrdq $6,%r14,%r14
xorq %rcx,%rdi
addq %r12,%rax
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rbx,%r14
addq %r13,%rax
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
shrdq $23,%r13,%r13
movq %r14,%rax
movq %r9,%r12
shrdq $5,%r14,%r14
xorq %r8,%r13
xorq %r10,%r12
shrdq $4,%r13,%r13
xorq %rax,%r14
andq %r8,%r12
xorq %r8,%r13
addq 64(%rsp),%r11
movq %rax,%r15
xorq %r10,%r12
shrdq $6,%r14,%r14
xorq %rbx,%r15
addq %r12,%r11
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rax,%r14
addq %r13,%r11
xorq %rbx,%rdi
shrdq $28,%r14,%r14
addq %r11,%rdx
addq %rdi,%r11
movq %rdx,%r13
addq %r11,%r14
shrdq $23,%r13,%r13
movq %r14,%r11
movq %r8,%r12
shrdq $5,%r14,%r14
xorq %rdx,%r13
xorq %r9,%r12
shrdq $4,%r13,%r13
xorq %r11,%r14
andq %rdx,%r12
xorq %rdx,%r13
addq 72(%rsp),%r10
movq %r11,%rdi
xorq %r9,%r12
shrdq $6,%r14,%r14
xorq %rax,%rdi
addq %r12,%r10
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r11,%r14
addq %r13,%r10
xorq %rax,%r15
shrdq $28,%r14,%r14
addq %r10,%rcx
addq %r15,%r10
movq %rcx,%r13
addq %r10,%r14
shrdq $23,%r13,%r13
movq %r14,%r10
movq %rdx,%r12
shrdq $5,%r14,%r14
xorq %rcx,%r13
xorq %r8,%r12
shrdq $4,%r13,%r13
xorq %r10,%r14
andq %rcx,%r12
xorq %rcx,%r13
addq 80(%rsp),%r9
movq %r10,%r15
xorq %r8,%r12
shrdq $6,%r14,%r14
xorq %r11,%r15
addq %r12,%r9
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r10,%r14
addq %r13,%r9
xorq %r11,%rdi
shrdq $28,%r14,%r14
addq %r9,%rbx
addq %rdi,%r9
movq %rbx,%r13
addq %r9,%r14
shrdq $23,%r13,%r13
movq %r14,%r9
movq %rcx,%r12
shrdq $5,%r14,%r14
xorq %rbx,%r13
xorq %rdx,%r12
shrdq $4,%r13,%r13
xorq %r9,%r14
andq %rbx,%r12
xorq %rbx,%r13
addq 88(%rsp),%r8
movq %r9,%rdi
xorq %rdx,%r12
shrdq $6,%r14,%r14
xorq %r10,%rdi
addq %r12,%r8
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %r9,%r14
addq %r13,%r8
xorq %r10,%r15
shrdq $28,%r14,%r14
addq %r8,%rax
addq %r15,%r8
movq %rax,%r13
addq %r8,%r14
shrdq $23,%r13,%r13
movq %r14,%r8
movq %rbx,%r12
shrdq $5,%r14,%r14
xorq %rax,%r13
xorq %rcx,%r12
shrdq $4,%r13,%r13
xorq %r8,%r14
andq %rax,%r12
xorq %rax,%r13
addq 96(%rsp),%rdx
movq %r8,%r15
xorq %rcx,%r12
shrdq $6,%r14,%r14
xorq %r9,%r15
addq %r12,%rdx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %r8,%r14
addq %r13,%rdx
xorq %r9,%rdi
shrdq $28,%r14,%r14
addq %rdx,%r11
addq %rdi,%rdx
movq %r11,%r13
addq %rdx,%r14
shrdq $23,%r13,%r13
movq %r14,%rdx
movq %rax,%r12
shrdq $5,%r14,%r14
xorq %r11,%r13
xorq %rbx,%r12
shrdq $4,%r13,%r13
xorq %rdx,%r14
andq %r11,%r12
xorq %r11,%r13
addq 104(%rsp),%rcx
movq %rdx,%rdi
xorq %rbx,%r12
shrdq $6,%r14,%r14
xorq %r8,%rdi
addq %r12,%rcx
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rdx,%r14
addq %r13,%rcx
xorq %r8,%r15
shrdq $28,%r14,%r14
addq %rcx,%r10
addq %r15,%rcx
movq %r10,%r13
addq %rcx,%r14
shrdq $23,%r13,%r13
movq %r14,%rcx
movq %r11,%r12
shrdq $5,%r14,%r14
xorq %r10,%r13
xorq %rax,%r12
shrdq $4,%r13,%r13
xorq %rcx,%r14
andq %r10,%r12
xorq %r10,%r13
addq 112(%rsp),%rbx
movq %rcx,%r15
xorq %rax,%r12
shrdq $6,%r14,%r14
xorq %rdx,%r15
addq %r12,%rbx
shrdq $14,%r13,%r13
andq %r15,%rdi
xorq %rcx,%r14
addq %r13,%rbx
xorq %rdx,%rdi
shrdq $28,%r14,%r14
addq %rbx,%r9
addq %rdi,%rbx
movq %r9,%r13
addq %rbx,%r14
shrdq $23,%r13,%r13
movq %r14,%rbx
movq %r10,%r12
shrdq $5,%r14,%r14
xorq %r9,%r13
xorq %r11,%r12
shrdq $4,%r13,%r13
xorq %rbx,%r14
andq %r9,%r12
xorq %r9,%r13
addq 120(%rsp),%rax
movq %rbx,%rdi
xorq %r11,%r12
shrdq $6,%r14,%r14
xorq %rcx,%rdi
addq %r12,%rax
shrdq $14,%r13,%r13
andq %rdi,%r15
xorq %rbx,%r14
addq %r13,%rax
xorq %rcx,%r15
shrdq $28,%r14,%r14
addq %rax,%r8
addq %r15,%rax
movq %r8,%r13
addq %rax,%r14
movq 128+0(%rsp),%rdi
movq %r14,%rax
addq 0(%rdi),%rax
leaq 128(%rsi),%rsi
addq 8(%rdi),%rbx
addq 16(%rdi),%rcx
addq 24(%rdi),%rdx
addq 32(%rdi),%r8
addq 40(%rdi),%r9
addq 48(%rdi),%r10
addq 56(%rdi),%r11
cmpq 128+16(%rsp),%rsi
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
movq %rcx,16(%rdi)
movq %rdx,24(%rdi)
movq %r8,32(%rdi)
movq %r9,40(%rdi)
movq %r10,48(%rdi)
movq %r11,56(%rdi)
jb .Lloop_avx
movq 152(%rsp),%rsi
.cfi_def_cfa %rsi,8
vzeroupper
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx:
ret
.cfi_endproc
.size sha512_block_data_order_avx,.-sha512_block_data_order_avx
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 48,620
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/x86_64-mont5-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _bn_mul4x_mont_gather5
.private_extern _bn_mul4x_mont_gather5
.p2align 5
_bn_mul4x_mont_gather5:
_CET_ENDBR
.byte 0x67
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mul4x_prologue:
.byte 0x67
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$mul4xsp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$mul4xsp_done
.p2align 5
L$mul4xsp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$mul4xsp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mul4x_page_walk
jmp L$mul4x_page_walk_done
L$mul4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mul4x_page_walk
L$mul4x_page_walk_done:
negq %r9
movq %rax,40(%rsp)
L$mul4x_body:
call mul4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mul4x_epilogue:
ret
.p2align 5
mul4x_internal:
shlq $5,%r9
movd 8(%rax),%xmm5
leaq L$inc(%rip),%rax
leaq 128(%rdx,%r9,1),%r13
shrq $5,%r9
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 88-112(%rsp,%r9,1),%r10
leaq 128(%rdx),%r12
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
.byte 0x67,0x67
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
.byte 0x67
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,112(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,128(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,144(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,160(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,176(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,192(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,208(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,224(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,240(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,256(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,272(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,288(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,304(%r10)
paddd %xmm2,%xmm3
.byte 0x67
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,320(%r10)
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,336(%r10)
pand 64(%r12),%xmm0
pand 80(%r12),%xmm1
pand 96(%r12),%xmm2
movdqa %xmm3,352(%r10)
pand 112(%r12),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -128(%r12),%xmm4
movdqa -112(%r12),%xmm5
movdqa -96(%r12),%xmm2
pand 112(%r10),%xmm4
movdqa -80(%r12),%xmm3
pand 128(%r10),%xmm5
por %xmm4,%xmm0
pand 144(%r10),%xmm2
por %xmm5,%xmm1
pand 160(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -64(%r12),%xmm4
movdqa -48(%r12),%xmm5
movdqa -32(%r12),%xmm2
pand 176(%r10),%xmm4
movdqa -16(%r12),%xmm3
pand 192(%r10),%xmm5
por %xmm4,%xmm0
pand 208(%r10),%xmm2
por %xmm5,%xmm1
pand 224(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa 0(%r12),%xmm4
movdqa 16(%r12),%xmm5
movdqa 32(%r12),%xmm2
pand 240(%r10),%xmm4
movdqa 48(%r12),%xmm3
pand 256(%r10),%xmm5
por %xmm4,%xmm0
pand 272(%r10),%xmm2
por %xmm5,%xmm1
pand 288(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
por %xmm1,%xmm0
pshufd $0x4e,%xmm0,%xmm1
por %xmm1,%xmm0
leaq 256(%r12),%r12
.byte 102,72,15,126,195
movq %r13,16+8(%rsp)
movq %rdi,56+8(%rsp)
movq (%r8),%r8
movq (%rsi),%rax
leaq (%rsi,%r9,1),%rsi
negq %r9
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
leaq 64+8(%rsp),%r14
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi,%r9,1),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%r9),%r15
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdi,(%r14)
movq %rdx,%r13
jmp L$1st4x
.p2align 5
L$1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq 0(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdi,(%r14)
movq %rdx,%r13
addq $32,%r15
jnz L$1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%r13
leaq (%rcx,%r9,1),%rcx
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%r14)
jmp L$outer4x
.p2align 5
L$outer4x:
leaq 16+128(%r14),%rdx
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa -128(%r12),%xmm0
movdqa -112(%r12),%xmm1
movdqa -96(%r12),%xmm2
movdqa -80(%r12),%xmm3
pand -128(%rdx),%xmm0
pand -112(%rdx),%xmm1
por %xmm0,%xmm4
pand -96(%rdx),%xmm2
por %xmm1,%xmm5
pand -80(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%r12),%xmm0
movdqa -48(%r12),%xmm1
movdqa -32(%r12),%xmm2
movdqa -16(%r12),%xmm3
pand -64(%rdx),%xmm0
pand -48(%rdx),%xmm1
por %xmm0,%xmm4
pand -32(%rdx),%xmm2
por %xmm1,%xmm5
pand -16(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%r12),%xmm0
movdqa 16(%r12),%xmm1
movdqa 32(%r12),%xmm2
movdqa 48(%r12),%xmm3
pand 0(%rdx),%xmm0
pand 16(%rdx),%xmm1
por %xmm0,%xmm4
pand 32(%rdx),%xmm2
por %xmm1,%xmm5
pand 48(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%r12),%xmm0
movdqa 80(%r12),%xmm1
movdqa 96(%r12),%xmm2
movdqa 112(%r12),%xmm3
pand 64(%rdx),%xmm0
pand 80(%rdx),%xmm1
por %xmm0,%xmm4
pand 96(%rdx),%xmm2
por %xmm1,%xmm5
pand 112(%rdx),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
leaq 256(%r12),%r12
.byte 102,72,15,126,195
movq (%r14,%r9,1),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
movq %rdi,(%r14)
leaq (%r14,%r9,1),%r14
mulq %rbp
addq %rax,%r10
movq 8(%rsi,%r9,1),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%r9),%r15
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %rdx,%r13
jmp L$inner4x
.p2align 5
L$inner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
adcq $0,%rdx
addq 16(%r14),%r10
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-32(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx),%rax
adcq $0,%rdx
addq -8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq 0(%rcx),%rax
adcq $0,%rdx
addq (%r14),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-16(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 32(%rcx),%rcx
adcq $0,%rdx
movq %r13,-8(%r14)
movq %rdx,%r13
addq $32,%r15
jnz L$inner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx),%rax
adcq $0,%rdx
addq 16(%r14),%r10
leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %rdi,-32(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq %rbp,%rax
movq -8(%rcx),%rbp
adcq $0,%rdx
addq -8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %r13,-24(%r14)
movq %rdx,%r13
movq %rdi,-16(%r14)
leaq (%rcx,%r9,1),%rcx
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%r14),%r13
adcq $0,%rdi
movq %r13,-8(%r14)
cmpq 16+8(%rsp),%r12
jb L$outer4x
xorq %rax,%rax
subq %r13,%rbp
adcq %r15,%r15
orq %r15,%rdi
subq %rdi,%rax
leaq (%r14,%r9,1),%rbx
movq (%rcx),%r12
leaq (%rcx),%rbp
movq %r9,%rcx
sarq $3+2,%rcx
movq 56+8(%rsp),%rdi
decq %r12
xorq %r10,%r10
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqr4x_sub_entry
.globl _bn_power5_nohw
.private_extern _bn_power5_nohw
.p2align 5
_bn_power5_nohw:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$power5_prologue:
shll $3,%r9d
leal (%r9,%r9,2),%r10d
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$pwr_sp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$pwr_sp_done
.p2align 5
L$pwr_sp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$pwr_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwr_page_walk
jmp L$pwr_page_walk_done
L$pwr_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwr_page_walk
L$pwr_page_walk_done:
movq %r9,%r10
negq %r9
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$power5_body:
.byte 102,72,15,110,207
.byte 102,72,15,110,209
.byte 102,73,15,110,218
.byte 102,72,15,110,226
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
call __bn_sqr8x_internal
call __bn_post4x_internal
.byte 102,72,15,126,209
.byte 102,72,15,126,226
movq %rsi,%rdi
movq 40(%rsp),%rax
leaq 32(%rsp),%r8
call mul4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$power5_epilogue:
ret
.globl _bn_sqr8x_internal
.private_extern _bn_sqr8x_internal
.private_extern _bn_sqr8x_internal
.p2align 5
_bn_sqr8x_internal:
__bn_sqr8x_internal:
_CET_ENDBR
leaq 32(%r10),%rbp
leaq (%rsi,%r9,1),%rsi
movq %r9,%rcx
movq -32(%rsi,%rbp,1),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi,%rbp,1),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi,%rbp,1),%rbx
movq %rax,%r15
mulq %r14
movq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
movq %r10,-24(%rdi,%rbp,1)
mulq %r14
addq %rax,%r11
movq %rbx,%rax
adcq $0,%rdx
movq %r11,-16(%rdi,%rbp,1)
movq %rdx,%r10
movq -8(%rsi,%rbp,1),%rbx
mulq %r15
movq %rax,%r12
movq %rbx,%rax
movq %rdx,%r13
leaq (%rbp),%rcx
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
jmp L$sqr4x_1st
.p2align 5
L$sqr4x_1st:
movq (%rsi,%rcx,1),%rbx
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %rdx,%r12
adcq $0,%r12
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 8(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,(%rdi,%rcx,1)
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq 16(%rsi,%rcx,1),%rbx
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %r10,8(%rdi,%rcx,1)
movq %rdx,%r12
adcq $0,%r12
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 24(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,16(%rdi,%rcx,1)
movq %rdx,%r13
adcq $0,%r13
leaq 32(%rcx),%rcx
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
cmpq $0,%rcx
jne L$sqr4x_1st
mulq %r15
addq %rax,%r13
leaq 16(%rbp),%rbp
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
jmp L$sqr4x_outer
.p2align 5
L$sqr4x_outer:
movq -32(%rsi,%rbp,1),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi,%rbp,1),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi,%rbp,1),%rbx
movq %rax,%r15
mulq %r14
movq -24(%rdi,%rbp,1),%r10
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
movq %r10,-24(%rdi,%rbp,1)
movq %rdx,%r11
mulq %r14
addq %rax,%r11
movq %rbx,%rax
adcq $0,%rdx
addq -16(%rdi,%rbp,1),%r11
movq %rdx,%r10
adcq $0,%r10
movq %r11,-16(%rdi,%rbp,1)
xorq %r12,%r12
movq -8(%rsi,%rbp,1),%rbx
mulq %r15
addq %rax,%r12
movq %rbx,%rax
adcq $0,%rdx
addq -8(%rdi,%rbp,1),%r12
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
addq %r12,%r10
movq %rdx,%r11
adcq $0,%r11
movq %r10,-8(%rdi,%rbp,1)
leaq (%rbp),%rcx
jmp L$sqr4x_inner
.p2align 5
L$sqr4x_inner:
movq (%rsi,%rcx,1),%rbx
mulq %r15
addq %rax,%r13
movq %rbx,%rax
movq %rdx,%r12
adcq $0,%r12
addq (%rdi,%rcx,1),%r13
adcq $0,%r12
.byte 0x67
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq 8(%rsi,%rcx,1),%rbx
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %r11,(%rdi,%rcx,1)
movq %rbx,%rax
movq %rdx,%r13
adcq $0,%r13
addq 8(%rdi,%rcx,1),%r12
leaq 16(%rcx),%rcx
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
adcq $0,%rdx
addq %r12,%r10
movq %rdx,%r11
adcq $0,%r11
movq %r10,-8(%rdi,%rcx,1)
cmpq $0,%rcx
jne L$sqr4x_inner
.byte 0x67
mulq %r15
addq %rax,%r13
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
addq $16,%rbp
jnz L$sqr4x_outer
movq -32(%rsi),%r14
leaq 48+8(%rsp,%r9,2),%rdi
movq -24(%rsi),%rax
leaq -32(%rdi,%rbp,1),%rdi
movq -16(%rsi),%rbx
movq %rax,%r15
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
mulq %r14
addq %rax,%r11
movq %rbx,%rax
movq %r10,-24(%rdi)
movq %rdx,%r10
adcq $0,%r10
addq %r13,%r11
movq -8(%rsi),%rbx
adcq $0,%r10
mulq %r15
addq %rax,%r12
movq %rbx,%rax
movq %r11,-16(%rdi)
movq %rdx,%r13
adcq $0,%r13
mulq %r14
addq %rax,%r10
movq %rbx,%rax
movq %rdx,%r11
adcq $0,%r11
addq %r12,%r10
adcq $0,%r11
movq %r10,-8(%rdi)
mulq %r15
addq %rax,%r13
movq -16(%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,(%rdi)
movq %rdx,%r12
movq %rdx,8(%rdi)
mulq %rbx
addq $16,%rbp
xorq %r14,%r14
subq %r9,%rbp
xorq %r15,%r15
addq %r12,%rax
adcq $0,%rdx
movq %rax,8(%rdi)
movq %rdx,16(%rdi)
movq %r15,24(%rdi)
movq -16(%rsi,%rbp,1),%rax
leaq 48+8(%rsp),%rdi
xorq %r10,%r10
movq 8(%rdi),%r11
leaq (%r14,%r10,2),%r12
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq 16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 24(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi,%rbp,1),%rax
movq %r12,(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 32(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 40(%rdi),%r11
adcq %rax,%rbx
movq 0(%rsi,%rbp,1),%rax
movq %rbx,16(%rdi)
adcq %rdx,%r8
leaq 16(%rbp),%rbp
movq %r8,24(%rdi)
sbbq %r15,%r15
leaq 64(%rdi),%rdi
jmp L$sqr4x_shift_n_add
.p2align 5
L$sqr4x_shift_n_add:
leaq (%r14,%r10,2),%r12
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq -16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq -8(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi,%rbp,1),%rax
movq %r12,-32(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,-24(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 0(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 8(%rdi),%r11
adcq %rax,%rbx
movq 0(%rsi,%rbp,1),%rax
movq %rbx,-16(%rdi)
adcq %rdx,%r8
leaq (%r14,%r10,2),%r12
movq %r8,-8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq 16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 24(%rdi),%r11
adcq %rax,%r12
movq 8(%rsi,%rbp,1),%rax
movq %r12,0(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,8(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
movq 32(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq 40(%rdi),%r11
adcq %rax,%rbx
movq 16(%rsi,%rbp,1),%rax
movq %rbx,16(%rdi)
adcq %rdx,%r8
movq %r8,24(%rdi)
sbbq %r15,%r15
leaq 64(%rdi),%rdi
addq $32,%rbp
jnz L$sqr4x_shift_n_add
leaq (%r14,%r10,2),%r12
.byte 0x67
shrq $63,%r10
leaq (%rcx,%r11,2),%r13
shrq $63,%r11
orq %r10,%r13
movq -16(%rdi),%r10
movq %r11,%r14
mulq %rax
negq %r15
movq -8(%rdi),%r11
adcq %rax,%r12
movq -8(%rsi),%rax
movq %r12,-32(%rdi)
adcq %rdx,%r13
leaq (%r14,%r10,2),%rbx
movq %r13,-24(%rdi)
sbbq %r15,%r15
shrq $63,%r10
leaq (%rcx,%r11,2),%r8
shrq $63,%r11
orq %r10,%r8
mulq %rax
negq %r15
adcq %rax,%rbx
adcq %rdx,%r8
movq %rbx,-16(%rdi)
movq %r8,-8(%rdi)
.byte 102,72,15,126,213
__bn_sqr8x_reduction:
xorq %rax,%rax
leaq (%r9,%rbp,1),%rcx
leaq 48+8(%rsp,%r9,2),%rdx
movq %rcx,0+8(%rsp)
leaq 48+8(%rsp,%r9,1),%rdi
movq %rdx,8+8(%rsp)
negq %r9
jmp L$8x_reduction_loop
.p2align 5
L$8x_reduction_loop:
leaq (%rdi,%r9,1),%rdi
.byte 0x66
movq 0(%rdi),%rbx
movq 8(%rdi),%r9
movq 16(%rdi),%r10
movq 24(%rdi),%r11
movq 32(%rdi),%r12
movq 40(%rdi),%r13
movq 48(%rdi),%r14
movq 56(%rdi),%r15
movq %rax,(%rdx)
leaq 64(%rdi),%rdi
.byte 0x67
movq %rbx,%r8
imulq 32+8(%rsp),%rbx
movq 0(%rbp),%rax
movl $8,%ecx
jmp L$8x_reduce
.p2align 5
L$8x_reduce:
mulq %rbx
movq 8(%rbp),%rax
negq %r8
movq %rdx,%r8
adcq $0,%r8
mulq %rbx
addq %rax,%r9
movq 16(%rbp),%rax
adcq $0,%rdx
addq %r9,%r8
movq %rbx,48-8+8(%rsp,%rcx,8)
movq %rdx,%r9
adcq $0,%r9
mulq %rbx
addq %rax,%r10
movq 24(%rbp),%rax
adcq $0,%rdx
addq %r10,%r9
movq 32+8(%rsp),%rsi
movq %rdx,%r10
adcq $0,%r10
mulq %rbx
addq %rax,%r11
movq 32(%rbp),%rax
adcq $0,%rdx
imulq %r8,%rsi
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
mulq %rbx
addq %rax,%r12
movq 40(%rbp),%rax
adcq $0,%rdx
addq %r12,%r11
movq %rdx,%r12
adcq $0,%r12
mulq %rbx
addq %rax,%r13
movq 48(%rbp),%rax
adcq $0,%rdx
addq %r13,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %rbx
addq %rax,%r14
movq 56(%rbp),%rax
adcq $0,%rdx
addq %r14,%r13
movq %rdx,%r14
adcq $0,%r14
mulq %rbx
movq %rsi,%rbx
addq %rax,%r15
movq 0(%rbp),%rax
adcq $0,%rdx
addq %r15,%r14
movq %rdx,%r15
adcq $0,%r15
decl %ecx
jnz L$8x_reduce
leaq 64(%rbp),%rbp
xorq %rax,%rax
movq 8+8(%rsp),%rdx
cmpq 0+8(%rsp),%rbp
jae L$8x_no_tail
.byte 0x66
addq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
sbbq %rsi,%rsi
movq 48+56+8(%rsp),%rbx
movl $8,%ecx
movq 0(%rbp),%rax
jmp L$8x_tail
.p2align 5
L$8x_tail:
mulq %rbx
addq %rax,%r8
movq 8(%rbp),%rax
movq %r8,(%rdi)
movq %rdx,%r8
adcq $0,%r8
mulq %rbx
addq %rax,%r9
movq 16(%rbp),%rax
adcq $0,%rdx
addq %r9,%r8
leaq 8(%rdi),%rdi
movq %rdx,%r9
adcq $0,%r9
mulq %rbx
addq %rax,%r10
movq 24(%rbp),%rax
adcq $0,%rdx
addq %r10,%r9
movq %rdx,%r10
adcq $0,%r10
mulq %rbx
addq %rax,%r11
movq 32(%rbp),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
mulq %rbx
addq %rax,%r12
movq 40(%rbp),%rax
adcq $0,%rdx
addq %r12,%r11
movq %rdx,%r12
adcq $0,%r12
mulq %rbx
addq %rax,%r13
movq 48(%rbp),%rax
adcq $0,%rdx
addq %r13,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %rbx
addq %rax,%r14
movq 56(%rbp),%rax
adcq $0,%rdx
addq %r14,%r13
movq %rdx,%r14
adcq $0,%r14
mulq %rbx
movq 48-16+8(%rsp,%rcx,8),%rbx
addq %rax,%r15
adcq $0,%rdx
addq %r15,%r14
movq 0(%rbp),%rax
movq %rdx,%r15
adcq $0,%r15
decl %ecx
jnz L$8x_tail
leaq 64(%rbp),%rbp
movq 8+8(%rsp),%rdx
cmpq 0+8(%rsp),%rbp
jae L$8x_tail_done
movq 48+56+8(%rsp),%rbx
negq %rsi
movq 0(%rbp),%rax
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
sbbq %rsi,%rsi
movl $8,%ecx
jmp L$8x_tail
.p2align 5
L$8x_tail_done:
xorq %rax,%rax
addq (%rdx),%r8
adcq $0,%r9
adcq $0,%r10
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
adcq $0,%rax
negq %rsi
L$8x_no_tail:
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
adcq $0,%rax
movq -8(%rbp),%rcx
xorq %rsi,%rsi
.byte 102,72,15,126,213
movq %r8,0(%rdi)
movq %r9,8(%rdi)
.byte 102,73,15,126,217
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq %r12,32(%rdi)
movq %r13,40(%rdi)
movq %r14,48(%rdi)
movq %r15,56(%rdi)
leaq 64(%rdi),%rdi
cmpq %rdx,%rdi
jb L$8x_reduction_loop
ret
.p2align 5
__bn_post4x_internal:
movq 0(%rbp),%r12
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
.byte 102,72,15,126,207
negq %rax
.byte 102,72,15,126,206
sarq $3+2,%rcx
decq %r12
xorq %r10,%r10
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqr4x_sub_entry
.p2align 4
L$sqr4x_sub:
movq 0(%rbp),%r12
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
L$sqr4x_sub_entry:
leaq 32(%rbp),%rbp
notq %r12
notq %r13
notq %r14
notq %r15
andq %rax,%r12
andq %rax,%r13
andq %rax,%r14
andq %rax,%r15
negq %r10
adcq 0(%rbx),%r12
adcq 8(%rbx),%r13
adcq 16(%rbx),%r14
adcq 24(%rbx),%r15
movq %r12,0(%rdi)
leaq 32(%rbx),%rbx
movq %r13,8(%rdi)
sbbq %r10,%r10
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz L$sqr4x_sub
movq %r9,%r10
negq %r9
ret
.globl _bn_mulx4x_mont_gather5
.private_extern _bn_mulx4x_mont_gather5
.p2align 5
_bn_mulx4x_mont_gather5:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mulx4x_prologue:
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$mulx4xsp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$mulx4xsp_done
L$mulx4xsp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$mulx4xsp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
jmp L$mulx4x_page_walk_done
L$mulx4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
L$mulx4x_page_walk_done:
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$mulx4x_body:
call mulx4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mulx4x_epilogue:
ret
.p2align 5
mulx4x_internal:
movq %r9,8(%rsp)
movq %r9,%r10
negq %r9
shlq $5,%r9
negq %r10
leaq 128(%rdx,%r9,1),%r13
shrq $5+5,%r9
movd 8(%rax),%xmm5
subq $1,%r9
leaq L$inc(%rip),%rax
movq %r13,16+8(%rsp)
movq %r9,24+8(%rsp)
movq %rdi,56+8(%rsp)
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 88-112(%rsp,%r10,1),%r10
leaq 128(%rdx),%rdi
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
.byte 0x67
movdqa %xmm1,%xmm2
.byte 0x67
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,112(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,128(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,144(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,160(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,176(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,192(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,208(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,224(%r10)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,240(%r10)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,256(%r10)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,272(%r10)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,288(%r10)
movdqa %xmm4,%xmm3
.byte 0x67
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,304(%r10)
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,320(%r10)
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,336(%r10)
pand 64(%rdi),%xmm0
pand 80(%rdi),%xmm1
pand 96(%rdi),%xmm2
movdqa %xmm3,352(%r10)
pand 112(%rdi),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -128(%rdi),%xmm4
movdqa -112(%rdi),%xmm5
movdqa -96(%rdi),%xmm2
pand 112(%r10),%xmm4
movdqa -80(%rdi),%xmm3
pand 128(%r10),%xmm5
por %xmm4,%xmm0
pand 144(%r10),%xmm2
por %xmm5,%xmm1
pand 160(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa -64(%rdi),%xmm4
movdqa -48(%rdi),%xmm5
movdqa -32(%rdi),%xmm2
pand 176(%r10),%xmm4
movdqa -16(%rdi),%xmm3
pand 192(%r10),%xmm5
por %xmm4,%xmm0
pand 208(%r10),%xmm2
por %xmm5,%xmm1
pand 224(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
movdqa 0(%rdi),%xmm4
movdqa 16(%rdi),%xmm5
movdqa 32(%rdi),%xmm2
pand 240(%r10),%xmm4
movdqa 48(%rdi),%xmm3
pand 256(%r10),%xmm5
por %xmm4,%xmm0
pand 272(%r10),%xmm2
por %xmm5,%xmm1
pand 288(%r10),%xmm3
por %xmm2,%xmm0
por %xmm3,%xmm1
pxor %xmm1,%xmm0
pshufd $0x4e,%xmm0,%xmm1
por %xmm1,%xmm0
leaq 256(%rdi),%rdi
.byte 102,72,15,126,194
leaq 64+32+8(%rsp),%rbx
movq %rdx,%r9
mulxq 0(%rsi),%r8,%rax
mulxq 8(%rsi),%r11,%r12
addq %rax,%r11
mulxq 16(%rsi),%rax,%r13
adcq %rax,%r12
adcq $0,%r13
mulxq 24(%rsi),%rax,%r14
movq %r8,%r15
imulq 32+8(%rsp),%r8
xorq %rbp,%rbp
movq %r8,%rdx
movq %rdi,8+8(%rsp)
leaq 32(%rsi),%rsi
adcxq %rax,%r13
adcxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
movq 24+8(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
adcxq %rax,%r12
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r12,-16(%rbx)
jmp L$mulx4x_1st
.p2align 5
L$mulx4x_1st:
adcxq %rbp,%r15
mulxq 0(%rsi),%r10,%rax
adcxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
.byte 0x67,0x67
movq %r8,%rdx
adcxq %rax,%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
movq %r11,-32(%rbx)
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_1st
movq 8(%rsp),%rax
adcq %rbp,%r15
leaq (%rsi,%rax,1),%rsi
addq %r15,%r14
movq 8+8(%rsp),%rdi
adcq %rbp,%rbp
movq %r14,-8(%rbx)
jmp L$mulx4x_outer
.p2align 5
L$mulx4x_outer:
leaq 16-256(%rbx),%r10
pxor %xmm4,%xmm4
.byte 0x67,0x67
pxor %xmm5,%xmm5
movdqa -128(%rdi),%xmm0
movdqa -112(%rdi),%xmm1
movdqa -96(%rdi),%xmm2
pand 256(%r10),%xmm0
movdqa -80(%rdi),%xmm3
pand 272(%r10),%xmm1
por %xmm0,%xmm4
pand 288(%r10),%xmm2
por %xmm1,%xmm5
pand 304(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%rdi),%xmm0
movdqa -48(%rdi),%xmm1
movdqa -32(%rdi),%xmm2
pand 320(%r10),%xmm0
movdqa -16(%rdi),%xmm3
pand 336(%r10),%xmm1
por %xmm0,%xmm4
pand 352(%r10),%xmm2
por %xmm1,%xmm5
pand 368(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%rdi),%xmm0
movdqa 16(%rdi),%xmm1
movdqa 32(%rdi),%xmm2
pand 384(%r10),%xmm0
movdqa 48(%rdi),%xmm3
pand 400(%r10),%xmm1
por %xmm0,%xmm4
pand 416(%r10),%xmm2
por %xmm1,%xmm5
pand 432(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%rdi),%xmm0
movdqa 80(%rdi),%xmm1
movdqa 96(%rdi),%xmm2
pand 448(%r10),%xmm0
movdqa 112(%rdi),%xmm3
pand 464(%r10),%xmm1
por %xmm0,%xmm4
pand 480(%r10),%xmm2
por %xmm1,%xmm5
pand 496(%r10),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
leaq 256(%rdi),%rdi
.byte 102,72,15,126,194
movq %rbp,(%rbx)
leaq 32(%rbx,%rax,1),%rbx
mulxq 0(%rsi),%r8,%r11
xorq %rbp,%rbp
movq %rdx,%r9
mulxq 8(%rsi),%r14,%r12
adoxq -32(%rbx),%r8
adcxq %r14,%r11
mulxq 16(%rsi),%r15,%r13
adoxq -24(%rbx),%r11
adcxq %r15,%r12
mulxq 24(%rsi),%rdx,%r14
adoxq -16(%rbx),%r12
adcxq %rdx,%r13
leaq (%rcx,%rax,1),%rcx
leaq 32(%rsi),%rsi
adoxq -8(%rbx),%r13
adcxq %rbp,%r14
adoxq %rbp,%r14
movq %r8,%r15
imulq 32+8(%rsp),%r8
movq %r8,%rdx
xorq %rbp,%rbp
movq %rdi,8+8(%rsp)
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq 24+8(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r12
movq %r11,-24(%rbx)
adoxq %rbp,%r15
movq %r12,-16(%rbx)
leaq 32(%rcx),%rcx
jmp L$mulx4x_inner
.p2align 5
L$mulx4x_inner:
mulxq 0(%rsi),%r10,%rax
adcxq %rbp,%r15
adoxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq 0(%rbx),%r10
adoxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq 8(%rbx),%r11
adoxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
movq %r8,%rdx
adcxq 16(%rbx),%r12
adoxq %rax,%r13
adcxq 24(%rbx),%r13
adoxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adcxq %rbp,%r14
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
adoxq %r15,%r13
movq %r11,-32(%rbx)
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
leaq 32(%rcx),%rcx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_inner
movq 0+8(%rsp),%rax
adcq %rbp,%r15
subq 0(%rbx),%rdi
movq 8+8(%rsp),%rdi
movq 16+8(%rsp),%r10
adcq %r15,%r14
leaq (%rsi,%rax,1),%rsi
adcq %rbp,%rbp
movq %r14,-8(%rbx)
cmpq %r10,%rdi
jb L$mulx4x_outer
movq -8(%rcx),%r10
movq %rbp,%r8
movq (%rcx,%rax,1),%r12
leaq (%rcx,%rax,1),%rbp
movq %rax,%rcx
leaq (%rbx,%rax,1),%rdi
xorl %eax,%eax
xorq %r15,%r15
subq %r14,%r10
adcq %r15,%r15
orq %r15,%r8
sarq $3+2,%rcx
subq %r8,%rax
movq 56+8(%rsp),%rdx
decq %r12
movq 8(%rbp),%r13
xorq %r8,%r8
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqrx4x_sub_entry
.globl _bn_powerx5
.private_extern _bn_powerx5
.p2align 5
_bn_powerx5:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$powerx5_prologue:
shll $3,%r9d
leaq (%r9,%r9,2),%r10
negq %r9
movq (%r8),%r8
leaq -320(%rsp,%r9,2),%r11
movq %rsp,%rbp
subq %rdi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$pwrx_sp_alt
subq %r11,%rbp
leaq -320(%rbp,%r9,2),%rbp
jmp L$pwrx_sp_done
.p2align 5
L$pwrx_sp_alt:
leaq 4096-320(,%r9,2),%r10
leaq -320(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$pwrx_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwrx_page_walk
jmp L$pwrx_page_walk_done
L$pwrx_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$pwrx_page_walk
L$pwrx_page_walk_done:
movq %r9,%r10
negq %r9
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,72,15,110,209
.byte 102,73,15,110,218
.byte 102,72,15,110,226
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$powerx5_body:
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
call __bn_sqrx8x_internal
call __bn_postx4x_internal
movq %r10,%r9
movq %rsi,%rdi
.byte 102,72,15,126,209
.byte 102,72,15,126,226
movq 40(%rsp),%rax
call mulx4x_internal
movq 40(%rsp),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$powerx5_epilogue:
ret
.globl _bn_sqrx8x_internal
.private_extern _bn_sqrx8x_internal
.private_extern _bn_sqrx8x_internal
.p2align 5
_bn_sqrx8x_internal:
__bn_sqrx8x_internal:
_CET_ENDBR
leaq 48+8(%rsp),%rdi
leaq (%rsi,%r9,1),%rbp
movq %r9,0+8(%rsp)
movq %rbp,8+8(%rsp)
jmp L$sqr8x_zero_start
.p2align 5
.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
L$sqrx8x_zero:
.byte 0x3e
movdqa %xmm0,0(%rdi)
movdqa %xmm0,16(%rdi)
movdqa %xmm0,32(%rdi)
movdqa %xmm0,48(%rdi)
L$sqr8x_zero_start:
movdqa %xmm0,64(%rdi)
movdqa %xmm0,80(%rdi)
movdqa %xmm0,96(%rdi)
movdqa %xmm0,112(%rdi)
leaq 128(%rdi),%rdi
subq $64,%r9
jnz L$sqrx8x_zero
movq 0(%rsi),%rdx
xorq %r10,%r10
xorq %r11,%r11
xorq %r12,%r12
xorq %r13,%r13
xorq %r14,%r14
xorq %r15,%r15
leaq 48+8(%rsp),%rdi
xorq %rbp,%rbp
jmp L$sqrx8x_outer_loop
.p2align 5
L$sqrx8x_outer_loop:
mulxq 8(%rsi),%r8,%rax
adcxq %r9,%r8
adoxq %rax,%r10
mulxq 16(%rsi),%r9,%rax
adcxq %r10,%r9
adoxq %rax,%r11
.byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00
adcxq %r11,%r10
adoxq %rax,%r12
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00
adcxq %r12,%r11
adoxq %rax,%r13
mulxq 40(%rsi),%r12,%rax
adcxq %r13,%r12
adoxq %rax,%r14
mulxq 48(%rsi),%r13,%rax
adcxq %r14,%r13
adoxq %r15,%rax
mulxq 56(%rsi),%r14,%r15
movq 8(%rsi),%rdx
adcxq %rax,%r14
adoxq %rbp,%r15
adcq 64(%rdi),%r15
movq %r8,8(%rdi)
movq %r9,16(%rdi)
sbbq %rcx,%rcx
xorq %rbp,%rbp
mulxq 16(%rsi),%r8,%rbx
mulxq 24(%rsi),%r9,%rax
adcxq %r10,%r8
adoxq %rbx,%r9
mulxq 32(%rsi),%r10,%rbx
adcxq %r11,%r9
adoxq %rax,%r10
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00
adcxq %r12,%r10
adoxq %rbx,%r11
.byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00
adcxq %r13,%r11
adoxq %r14,%r12
.byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00
movq 16(%rsi),%rdx
adcxq %rax,%r12
adoxq %rbx,%r13
adcxq %r15,%r13
adoxq %rbp,%r14
adcxq %rbp,%r14
movq %r8,24(%rdi)
movq %r9,32(%rdi)
mulxq 24(%rsi),%r8,%rbx
mulxq 32(%rsi),%r9,%rax
adcxq %r10,%r8
adoxq %rbx,%r9
mulxq 40(%rsi),%r10,%rbx
adcxq %r11,%r9
adoxq %rax,%r10
.byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00
adcxq %r12,%r10
adoxq %r13,%r11
.byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00
.byte 0x3e
movq 24(%rsi),%rdx
adcxq %rbx,%r11
adoxq %rax,%r12
adcxq %r14,%r12
movq %r8,40(%rdi)
movq %r9,48(%rdi)
mulxq 32(%rsi),%r8,%rax
adoxq %rbp,%r13
adcxq %rbp,%r13
mulxq 40(%rsi),%r9,%rbx
adcxq %r10,%r8
adoxq %rax,%r9
mulxq 48(%rsi),%r10,%rax
adcxq %r11,%r9
adoxq %r12,%r10
mulxq 56(%rsi),%r11,%r12
movq 32(%rsi),%rdx
movq 40(%rsi),%r14
adcxq %rbx,%r10
adoxq %rax,%r11
movq 48(%rsi),%r15
adcxq %r13,%r11
adoxq %rbp,%r12
adcxq %rbp,%r12
movq %r8,56(%rdi)
movq %r9,64(%rdi)
mulxq %r14,%r9,%rax
movq 56(%rsi),%r8
adcxq %r10,%r9
mulxq %r15,%r10,%rbx
adoxq %rax,%r10
adcxq %r11,%r10
mulxq %r8,%r11,%rax
movq %r14,%rdx
adoxq %rbx,%r11
adcxq %r12,%r11
adcxq %rbp,%rax
mulxq %r15,%r14,%rbx
mulxq %r8,%r12,%r13
movq %r15,%rdx
leaq 64(%rsi),%rsi
adcxq %r14,%r11
adoxq %rbx,%r12
adcxq %rax,%r12
adoxq %rbp,%r13
.byte 0x67,0x67
mulxq %r8,%r8,%r14
adcxq %r8,%r13
adcxq %rbp,%r14
cmpq 8+8(%rsp),%rsi
je L$sqrx8x_outer_break
negq %rcx
movq $-8,%rcx
movq %rbp,%r15
movq 64(%rdi),%r8
adcxq 72(%rdi),%r9
adcxq 80(%rdi),%r10
adcxq 88(%rdi),%r11
adcq 96(%rdi),%r12
adcq 104(%rdi),%r13
adcq 112(%rdi),%r14
adcq 120(%rdi),%r15
leaq (%rsi),%rbp
leaq 128(%rdi),%rdi
sbbq %rax,%rax
movq -64(%rsi),%rdx
movq %rax,16+8(%rsp)
movq %rdi,24+8(%rsp)
xorl %eax,%eax
jmp L$sqrx8x_loop
.p2align 5
L$sqrx8x_loop:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rax,%rbx
adoxq %r9,%r8
mulxq 8(%rbp),%rax,%r9
adcxq %rax,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rax,%r10
adcxq %rax,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
movq %rbx,(%rdi,%rcx,8)
movl $0,%ebx
adcxq %rax,%r13
adoxq %r15,%r14
.byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00
movq 8(%rsi,%rcx,8),%rdx
adcxq %rax,%r14
adoxq %rbx,%r15
adcxq %rbx,%r15
.byte 0x67
incq %rcx
jnz L$sqrx8x_loop
leaq 64(%rbp),%rbp
movq $-8,%rcx
cmpq 8+8(%rsp),%rbp
je L$sqrx8x_break
subq 16+8(%rsp),%rbx
.byte 0x66
movq -64(%rsi),%rdx
adcxq 0(%rdi),%r8
adcxq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
.byte 0x67
sbbq %rax,%rax
xorl %ebx,%ebx
movq %rax,16+8(%rsp)
jmp L$sqrx8x_loop
.p2align 5
L$sqrx8x_break:
xorq %rbp,%rbp
subq 16+8(%rsp),%rbx
adcxq %rbp,%r8
movq 24+8(%rsp),%rcx
adcxq %rbp,%r9
movq 0(%rsi),%rdx
adcq $0,%r10
movq %r8,0(%rdi)
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
cmpq %rcx,%rdi
je L$sqrx8x_outer_loop
movq %r9,8(%rdi)
movq 8(%rcx),%r9
movq %r10,16(%rdi)
movq 16(%rcx),%r10
movq %r11,24(%rdi)
movq 24(%rcx),%r11
movq %r12,32(%rdi)
movq 32(%rcx),%r12
movq %r13,40(%rdi)
movq 40(%rcx),%r13
movq %r14,48(%rdi)
movq 48(%rcx),%r14
movq %r15,56(%rdi)
movq 56(%rcx),%r15
movq %rcx,%rdi
jmp L$sqrx8x_outer_loop
.p2align 5
L$sqrx8x_outer_break:
movq %r9,72(%rdi)
.byte 102,72,15,126,217
movq %r10,80(%rdi)
movq %r11,88(%rdi)
movq %r12,96(%rdi)
movq %r13,104(%rdi)
movq %r14,112(%rdi)
leaq 48+8(%rsp),%rdi
movq (%rsi,%rcx,1),%rdx
movq 8(%rdi),%r11
xorq %r10,%r10
movq 0+8(%rsp),%r9
adoxq %r11,%r11
movq 16(%rdi),%r12
movq 24(%rdi),%r13
.p2align 5
L$sqrx4x_shift_n_add:
mulxq %rdx,%rax,%rbx
adoxq %r12,%r12
adcxq %r10,%rax
.byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00
.byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00
adoxq %r13,%r13
adcxq %r11,%rbx
movq 40(%rdi),%r11
movq %rax,0(%rdi)
movq %rbx,8(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r10,%r10
adcxq %r12,%rax
movq 16(%rsi,%rcx,1),%rdx
movq 48(%rdi),%r12
adoxq %r11,%r11
adcxq %r13,%rbx
movq 56(%rdi),%r13
movq %rax,16(%rdi)
movq %rbx,24(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r12,%r12
adcxq %r10,%rax
movq 24(%rsi,%rcx,1),%rdx
leaq 32(%rcx),%rcx
movq 64(%rdi),%r10
adoxq %r13,%r13
adcxq %r11,%rbx
movq 72(%rdi),%r11
movq %rax,32(%rdi)
movq %rbx,40(%rdi)
mulxq %rdx,%rax,%rbx
adoxq %r10,%r10
adcxq %r12,%rax
jrcxz L$sqrx4x_shift_n_add_break
.byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00
adoxq %r11,%r11
adcxq %r13,%rbx
movq 80(%rdi),%r12
movq 88(%rdi),%r13
movq %rax,48(%rdi)
movq %rbx,56(%rdi)
leaq 64(%rdi),%rdi
nop
jmp L$sqrx4x_shift_n_add
.p2align 5
L$sqrx4x_shift_n_add_break:
adcxq %r13,%rbx
movq %rax,48(%rdi)
movq %rbx,56(%rdi)
leaq 64(%rdi),%rdi
.byte 102,72,15,126,213
__bn_sqrx8x_reduction:
xorl %eax,%eax
movq 32+8(%rsp),%rbx
movq 48+8(%rsp),%rdx
leaq -64(%rbp,%r9,1),%rcx
movq %rcx,0+8(%rsp)
movq %rdi,8+8(%rsp)
leaq 48+8(%rsp),%rdi
jmp L$sqrx8x_reduction_loop
.p2align 5
L$sqrx8x_reduction_loop:
movq 8(%rdi),%r9
movq 16(%rdi),%r10
movq 24(%rdi),%r11
movq 32(%rdi),%r12
movq %rdx,%r8
imulq %rbx,%rdx
movq 40(%rdi),%r13
movq 48(%rdi),%r14
movq 56(%rdi),%r15
movq %rax,24+8(%rsp)
leaq 64(%rdi),%rdi
xorq %rsi,%rsi
movq $-8,%rcx
jmp L$sqrx8x_reduce
.p2align 5
L$sqrx8x_reduce:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rbx,%rax
adoxq %r9,%r8
mulxq 8(%rbp),%rbx,%r9
adcxq %rbx,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rbx,%r10
adcxq %rbx,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rbx,%r11
adcxq %rbx,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00
movq %rdx,%rax
movq %r8,%rdx
adcxq %rbx,%r11
adoxq %r13,%r12
mulxq 32+8(%rsp),%rbx,%rdx
movq %rax,%rdx
movq %rax,64+48+8(%rsp,%rcx,8)
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
adcxq %rax,%r13
adoxq %r15,%r14
mulxq 56(%rbp),%rax,%r15
movq %rbx,%rdx
adcxq %rax,%r14
adoxq %rsi,%r15
adcxq %rsi,%r15
.byte 0x67,0x67,0x67
incq %rcx
jnz L$sqrx8x_reduce
movq %rsi,%rax
cmpq 0+8(%rsp),%rbp
jae L$sqrx8x_no_tail
movq 48+8(%rsp),%rdx
addq 0(%rdi),%r8
leaq 64(%rbp),%rbp
movq $-8,%rcx
adcxq 8(%rdi),%r9
adcxq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
sbbq %rax,%rax
xorq %rsi,%rsi
movq %rax,16+8(%rsp)
jmp L$sqrx8x_tail
.p2align 5
L$sqrx8x_tail:
movq %r8,%rbx
mulxq 0(%rbp),%rax,%r8
adcxq %rax,%rbx
adoxq %r9,%r8
mulxq 8(%rbp),%rax,%r9
adcxq %rax,%r8
adoxq %r10,%r9
mulxq 16(%rbp),%rax,%r10
adcxq %rax,%r9
adoxq %r11,%r10
mulxq 24(%rbp),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 40(%rbp),%rax,%r13
adcxq %rax,%r12
adoxq %r14,%r13
mulxq 48(%rbp),%rax,%r14
adcxq %rax,%r13
adoxq %r15,%r14
mulxq 56(%rbp),%rax,%r15
movq 72+48+8(%rsp,%rcx,8),%rdx
adcxq %rax,%r14
adoxq %rsi,%r15
movq %rbx,(%rdi,%rcx,8)
movq %r8,%rbx
adcxq %rsi,%r15
incq %rcx
jnz L$sqrx8x_tail
cmpq 0+8(%rsp),%rbp
jae L$sqrx8x_tail_done
subq 16+8(%rsp),%rsi
movq 48+8(%rsp),%rdx
leaq 64(%rbp),%rbp
adcq 0(%rdi),%r8
adcq 8(%rdi),%r9
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
leaq 64(%rdi),%rdi
sbbq %rax,%rax
subq $8,%rcx
xorq %rsi,%rsi
movq %rax,16+8(%rsp)
jmp L$sqrx8x_tail
.p2align 5
L$sqrx8x_tail_done:
xorq %rax,%rax
addq 24+8(%rsp),%r8
adcq $0,%r9
adcq $0,%r10
adcq $0,%r11
adcq $0,%r12
adcq $0,%r13
adcq $0,%r14
adcq $0,%r15
adcq $0,%rax
subq 16+8(%rsp),%rsi
L$sqrx8x_no_tail:
adcq 0(%rdi),%r8
.byte 102,72,15,126,217
adcq 8(%rdi),%r9
movq 56(%rbp),%rsi
.byte 102,72,15,126,213
adcq 16(%rdi),%r10
adcq 24(%rdi),%r11
adcq 32(%rdi),%r12
adcq 40(%rdi),%r13
adcq 48(%rdi),%r14
adcq 56(%rdi),%r15
adcq $0,%rax
movq 32+8(%rsp),%rbx
movq 64(%rdi,%rcx,1),%rdx
movq %r8,0(%rdi)
leaq 64(%rdi),%r8
movq %r9,8(%rdi)
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq %r12,32(%rdi)
movq %r13,40(%rdi)
movq %r14,48(%rdi)
movq %r15,56(%rdi)
leaq 64(%rdi,%rcx,1),%rdi
cmpq 8+8(%rsp),%r8
jb L$sqrx8x_reduction_loop
ret
.p2align 5
__bn_postx4x_internal:
movq 0(%rbp),%r12
movq %rcx,%r10
movq %rcx,%r9
negq %rax
sarq $3+2,%rcx
.byte 102,72,15,126,202
.byte 102,72,15,126,206
decq %r12
movq 8(%rbp),%r13
xorq %r8,%r8
movq 16(%rbp),%r14
movq 24(%rbp),%r15
jmp L$sqrx4x_sub_entry
.p2align 4
L$sqrx4x_sub:
movq 0(%rbp),%r12
movq 8(%rbp),%r13
movq 16(%rbp),%r14
movq 24(%rbp),%r15
L$sqrx4x_sub_entry:
andnq %rax,%r12,%r12
leaq 32(%rbp),%rbp
andnq %rax,%r13,%r13
andnq %rax,%r14,%r14
andnq %rax,%r15,%r15
negq %r8
adcq 0(%rdi),%r12
adcq 8(%rdi),%r13
adcq 16(%rdi),%r14
adcq 24(%rdi),%r15
movq %r12,0(%rdx)
leaq 32(%rdi),%rdi
movq %r13,8(%rdx)
sbbq %r8,%r8
movq %r14,16(%rdx)
movq %r15,24(%rdx)
leaq 32(%rdx),%rdx
incq %rcx
jnz L$sqrx4x_sub
negq %r9
ret
.globl _bn_scatter5
.private_extern _bn_scatter5
.p2align 4
_bn_scatter5:
_CET_ENDBR
cmpl $0,%esi
jz L$scatter_epilogue
leaq (%rdx,%rcx,8),%rdx
L$scatter:
movq (%rdi),%rax
leaq 8(%rdi),%rdi
movq %rax,(%rdx)
leaq 256(%rdx),%rdx
subl $1,%esi
jnz L$scatter
L$scatter_epilogue:
ret
.globl _bn_gather5
.private_extern _bn_gather5
.p2align 5
_bn_gather5:
L$SEH_begin_bn_gather5:
_CET_ENDBR
.byte 0x4c,0x8d,0x14,0x24
.byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00
leaq L$inc(%rip),%rax
andq $-16,%rsp
movd %ecx,%xmm5
movdqa 0(%rax),%xmm0
movdqa 16(%rax),%xmm1
leaq 128(%rdx),%r11
leaq 128(%rsp),%rax
pshufd $0,%xmm5,%xmm5
movdqa %xmm1,%xmm4
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,-128(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,-112(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,-96(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,-80(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,-64(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,-48(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,-32(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,-16(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,0(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,16(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,32(%rax)
movdqa %xmm4,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm5,%xmm0
movdqa %xmm3,48(%rax)
movdqa %xmm4,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm5,%xmm1
movdqa %xmm0,64(%rax)
movdqa %xmm4,%xmm0
paddd %xmm2,%xmm3
pcmpeqd %xmm5,%xmm2
movdqa %xmm1,80(%rax)
movdqa %xmm4,%xmm1
paddd %xmm3,%xmm0
pcmpeqd %xmm5,%xmm3
movdqa %xmm2,96(%rax)
movdqa %xmm4,%xmm2
movdqa %xmm3,112(%rax)
jmp L$gather
.p2align 5
L$gather:
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa -128(%r11),%xmm0
movdqa -112(%r11),%xmm1
movdqa -96(%r11),%xmm2
pand -128(%rax),%xmm0
movdqa -80(%r11),%xmm3
pand -112(%rax),%xmm1
por %xmm0,%xmm4
pand -96(%rax),%xmm2
por %xmm1,%xmm5
pand -80(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa -64(%r11),%xmm0
movdqa -48(%r11),%xmm1
movdqa -32(%r11),%xmm2
pand -64(%rax),%xmm0
movdqa -16(%r11),%xmm3
pand -48(%rax),%xmm1
por %xmm0,%xmm4
pand -32(%rax),%xmm2
por %xmm1,%xmm5
pand -16(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 0(%r11),%xmm0
movdqa 16(%r11),%xmm1
movdqa 32(%r11),%xmm2
pand 0(%rax),%xmm0
movdqa 48(%r11),%xmm3
pand 16(%rax),%xmm1
por %xmm0,%xmm4
pand 32(%rax),%xmm2
por %xmm1,%xmm5
pand 48(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqa 64(%r11),%xmm0
movdqa 80(%r11),%xmm1
movdqa 96(%r11),%xmm2
pand 64(%rax),%xmm0
movdqa 112(%r11),%xmm3
pand 80(%rax),%xmm1
por %xmm0,%xmm4
pand 96(%rax),%xmm2
por %xmm1,%xmm5
pand 112(%rax),%xmm3
por %xmm2,%xmm4
por %xmm3,%xmm5
por %xmm5,%xmm4
leaq 256(%r11),%r11
pshufd $0x4e,%xmm4,%xmm0
por %xmm4,%xmm0
movq %xmm0,(%rdi)
leaq 8(%rdi),%rdi
subl $1,%esi
jnz L$gather
leaq (%r10),%rsp
ret
L$SEH_end_bn_gather5:
.section __DATA,__const
.p2align 6
L$inc:
.long 0,0, 1,1
.long 2,2, 2,2
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 25,166
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/vpaes-armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.section __TEXT,__const
.align 7 // totally strategic alignment
_vpaes_consts:
Lk_mc_forward: // mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
Lk_mc_backward: // mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
Lk_sr: // sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
//
// "Hot" constants
//
Lk_inv: // inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
Lk_ipt: // input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
Lk_sbo: // sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
Lk_sb1: // sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
Lk_sb2: // sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
//
// Key schedule constants
//
Lk_dksd: // decryption key schedule: invskew x*D
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
Lk_dksb: // decryption key schedule: invskew x*B
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
Lk_dkse: // decryption key schedule: invskew x*E + 0x63
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
Lk_dks9: // decryption key schedule: invskew x*9
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
Lk_rcon: // rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
Lk_opt: // output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
Lk_deskew: // deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.align 6
.text
##
## _aes_preheat
##
## Fills register %r10 -> .aes_consts (so you can -fPIC)
## and %xmm9-%xmm15 as specified below.
##
.align 4
_vpaes_encrypt_preheat:
adrp x10, Lk_inv@PAGE
add x10, x10, Lk_inv@PAGEOFF
movi v17.16b, #0x0f
ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv
ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2
ret
##
## _aes_encrypt_core
##
## AES-encrypt %xmm0.
##
## Inputs:
## %xmm0 = input
## %xmm9-%xmm15 as in _vpaes_preheat
## (%rdx) = scheduled keys
##
## Output in %xmm0
## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
## Preserves %xmm6 - %xmm8 so you get some local vectors
##
##
.align 4
_vpaes_encrypt_core:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward@PAGE+16
add x11, x11, Lk_mc_forward@PAGEOFF+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
b Lenc_entry
.align 4
Lenc_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
sub w8, w8, #1 // nr--
Lenc_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
ret
.align 4
_vpaes_encrypt_2x:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward@PAGE+16
add x11, x11, Lk_mc_forward@PAGEOFF+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
and v9.16b, v15.16b, v17.16b
ushr v8.16b, v15.16b, #4
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
tbl v9.16b, {v20.16b}, v9.16b
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
tbl v10.16b, {v21.16b}, v8.16b
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v8.16b, v9.16b, v16.16b
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
eor v8.16b, v8.16b, v10.16b
b Lenc_2x_entry
.align 4
Lenc_2x_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
tbl v12.16b, {v25.16b}, v10.16b
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
tbl v8.16b, {v24.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
tbl v13.16b, {v27.16b}, v10.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
tbl v10.16b, {v26.16b}, v11.16b
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
tbl v11.16b, {v8.16b}, v1.16b
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
eor v10.16b, v10.16b, v13.16b
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
tbl v8.16b, {v8.16b}, v4.16b
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
eor v11.16b, v11.16b, v10.16b
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
tbl v12.16b, {v11.16b},v1.16b
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
eor v8.16b, v8.16b, v11.16b
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
eor v8.16b, v8.16b, v12.16b
sub w8, w8, #1 // nr--
Lenc_2x_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
and v9.16b, v8.16b, v17.16b
ushr v8.16b, v8.16b, #4
tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
tbl v13.16b, {v19.16b},v9.16b
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
eor v9.16b, v9.16b, v8.16b
tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v11.16b, {v18.16b},v8.16b
tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
tbl v12.16b, {v18.16b},v9.16b
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v11.16b, v11.16b, v13.16b
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
eor v12.16b, v12.16b, v13.16b
tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v10.16b, {v18.16b},v11.16b
tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
tbl v11.16b, {v18.16b},v12.16b
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v10.16b, v10.16b, v9.16b
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
eor v11.16b, v11.16b, v8.16b
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_2x_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
tbl v12.16b, {v22.16b}, v10.16b
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
tbl v8.16b, {v23.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
tbl v1.16b, {v8.16b},v1.16b
ret
########################################################
## ##
## AES key schedule ##
## ##
########################################################
.align 4
_vpaes_key_preheat:
adrp x10, Lk_inv@PAGE
add x10, x10, Lk_inv@PAGEOFF
movi v16.16b, #0x5b // Lk_s63
adrp x11, Lk_sb1@PAGE
add x11, x11, Lk_sb1@PAGEOFF
movi v17.16b, #0x0f // Lk_s0F
ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt
adrp x10, Lk_dksd@PAGE
add x10, x10, Lk_dksd@PAGEOFF
ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1
adrp x11, Lk_mc_forward@PAGE
add x11, x11, Lk_mc_forward@PAGEOFF
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb
ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9
ld1 {v8.2d}, [x10] // Lk_rcon
ld1 {v9.2d}, [x11] // Lk_mc_forward[0]
ret
.align 4
_vpaes_schedule_core:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp,#-16]!
add x29,sp,#0
bl _vpaes_key_preheat // load the tables
ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
// input transform
mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
adrp x10, Lk_sr@PAGE // lea Lk_sr(%rip),%r10
add x10, x10, Lk_sr@PAGEOFF
add x8, x8, x10
// encrypting, output zeroth round key after transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx)
cmp w1, #192 // cmp $192, %esi
b.hi Lschedule_256
b.eq Lschedule_192
// 128: fall though
##
## .schedule_128
##
## 128-bit specific part of key schedule.
##
## This schedule is really simple, because all its parts
## are accomplished by the subroutines.
##
Lschedule_128:
mov x0, #10 // mov $10, %esi
Loop_schedule_128:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // write output
b Loop_schedule_128
##
## .aes_schedule_192
##
## 192-bit specific part of key schedule.
##
## The main body of this schedule is the same as the 128-bit
## schedule, but with more smearing. The long, high side is
## stored in %xmm7 as before, and the short, low side is in
## the high bits of %xmm6.
##
## This schedule is somewhat nastier, however, because each
## round produces 192 bits of key material, or 1.5 round keys.
## Therefore, on each cycle we do 2 rounds and produce 3 round
## keys.
##
.align 4
Lschedule_192:
sub x0, x0, #8
ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
bl _vpaes_schedule_transform // input transform
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
mov x0, #4 // mov $4, %esi
Loop_schedule_192:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0
bl _vpaes_schedule_mangle // save key n
bl _vpaes_schedule_192_smear
bl _vpaes_schedule_mangle // save key n+1
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // save key n+2
bl _vpaes_schedule_192_smear
b Loop_schedule_192
##
## .aes_schedule_256
##
## 256-bit specific part of key schedule.
##
## The structure here is very similar to the 128-bit
## schedule, but with an additional "low side" in
## %xmm6. The low side's rounds are the same as the
## high side's, except no rcon and no rotation.
##
.align 4
Lschedule_256:
ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform // input transform
mov x0, #7 // mov $7, %esi
Loop_schedule_256:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_mangle // output low result
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
// high round
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle
// low round. swap xmm7 and xmm6
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
movi v4.16b, #0
mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
b Loop_schedule_256
##
## .aes_schedule_mangle_last
##
## Mangler for last round of key schedule
## Mangles %xmm0
## when encrypting, outputs out(%xmm0) ^ 63
## when decrypting, outputs unskew(%xmm0)
##
## Always called right before return... jumps to cleanup and exits
##
.align 4
Lschedule_mangle_last:
// schedule last round key from xmm0
adrp x11, Lk_deskew@PAGE // lea Lk_deskew(%rip),%r11 # prepare to deskew
add x11, x11, Lk_deskew@PAGEOFF
cbnz w3, Lschedule_mangle_last_dec
// encrypting
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
adrp x11, Lk_opt@PAGE // lea Lk_opt(%rip), %r11 # prepare to output transform
add x11, x11, Lk_opt@PAGEOFF
add x2, x2, #32 // add $32, %rdx
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
Lschedule_mangle_last_dec:
ld1 {v20.2d,v21.2d}, [x11] // reload constants
sub x2, x2, #16 // add $-16, %rdx
eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform // output transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key
// cleanup
eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
ldp x29, x30, [sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
##
## .aes_schedule_192_smear
##
## Smear the short, low side in the 192-bit key schedule.
##
## Inputs:
## %xmm7: high side, b a x y
## %xmm6: low side, d c 0 0
## %xmm13: 0
##
## Outputs:
## %xmm6: b+c+d b+c 0 0
## %xmm0: b+c+d b+c b a
##
.align 4
_vpaes_schedule_192_smear:
movi v1.16b, #0
dup v0.4s, v7.s[3]
ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
ret
##
## .aes_schedule_round
##
## Runs one main round of the key schedule on %xmm0, %xmm7
##
## Specifically, runs subbytes on the high dword of %xmm0
## then rotates it by one byte and xors into the low dword of
## %xmm7.
##
## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
## next rcon.
##
## Smears the dwords of %xmm7 by xoring the low into the
## second low, result into third, result into highest.
##
## Returns results in %xmm7 = %xmm0.
## Clobbers %xmm1-%xmm4, %r11.
##
.align 4
_vpaes_schedule_round:
// extract rcon from xmm8
movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1
ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
// rotate
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0
// fall through...
// low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
// smear xmm7
ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4
// subbytes
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7
tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
// add in smeared stuff
eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
ret
##
## .aes_schedule_transform
##
## Linear-transform %xmm0 according to tables at (%r11)
##
## Requires that %xmm9 = 0x0F0F... as in preheat
## Output in %xmm0
## Clobbers %xmm1, %xmm2
##
.align 4
_vpaes_schedule_transform:
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0
// vmovdqa (%r11), %xmm2 # lo
tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
// vmovdqa 16(%r11), %xmm1 # hi
tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
ret
##
## .aes_schedule_mangle
##
## Mangle xmm0 from (basis-transformed) standard version
## to our version.
##
## On encrypt,
## xor with 0x63
## multiply by circulant 0,1,1,1
## apply shiftrows transform
##
## On decrypt,
## xor with 0x63
## multiply by "inverse mixcolumns" circulant E,B,D,9
## deskew
## apply shiftrows transform
##
##
## Writes out to (%rdx), and increments or decrements it
## Keeps track of round number mod 4 in %r8
## Preserves xmm0
## Clobbers xmm1-xmm5
##
.align 4
_vpaes_schedule_mangle:
mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
// vmovdqa .Lk_mc_forward(%rip),%xmm5
// encrypting
eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4
add x2, x2, #16 // add $16, %rdx
tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
Lschedule_mangle_both:
tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
add x8, x8, #48 // add $-16, %r8
and x8, x8, #~(1<<6) // and $0x30, %r8
st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
ret
.globl _vpaes_set_encrypt_key
.private_extern _vpaes_set_encrypt_key
.align 4
_vpaes_set_encrypt_key:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
lsr w9, w1, #5 // shr $5,%eax
add w9, w9, #5 // $5,%eax
str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov w3, #0 // mov $0,%ecx
mov x8, #0x30 // mov $0x30,%r8d
bl _vpaes_schedule_core
eor x0, x0, x0
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _vpaes_ctr32_encrypt_blocks
.private_extern _vpaes_ctr32_encrypt_blocks
.align 4
_vpaes_ctr32_encrypt_blocks:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
stp d10,d11,[sp,#-16]!
stp d12,d13,[sp,#-16]!
stp d14,d15,[sp,#-16]!
cbz x2, Lctr32_done
// Note, unlike the other functions, x2 here is measured in blocks,
// not bytes.
mov x17, x2
mov x2, x3
// Load the IV and counter portion.
ldr w6, [x4, #12]
ld1 {v7.16b}, [x4]
bl _vpaes_encrypt_preheat
tst x17, #1
rev w6, w6 // The counter is big-endian.
b.eq Lctr32_prep_loop
// Handle one block so the remaining block count is even for
// _vpaes_encrypt_2x.
ld1 {v6.16b}, [x0], #16 // Load input ahead of time
bl _vpaes_encrypt_core
eor v0.16b, v0.16b, v6.16b // XOR input and result
st1 {v0.16b}, [x1], #16
subs x17, x17, #1
// Update the counter.
add w6, w6, #1
rev w7, w6
mov v7.s[3], w7
b.ls Lctr32_done
Lctr32_prep_loop:
// _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x
// uses v14 and v15.
mov v15.16b, v7.16b
mov v14.16b, v7.16b
add w6, w6, #1
rev w7, w6
mov v15.s[3], w7
Lctr32_loop:
ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time
bl _vpaes_encrypt_2x
eor v0.16b, v0.16b, v6.16b // XOR input and result
eor v1.16b, v1.16b, v7.16b // XOR input and result (#2)
st1 {v0.16b,v1.16b}, [x1], #32
subs x17, x17, #2
// Update the counter.
add w7, w6, #1
add w6, w6, #2
rev w7, w7
mov v14.s[3], w7
rev w7, w6
mov v15.s[3], w7
b.hi Lctr32_loop
Lctr32_done:
ldp d14,d15,[sp],#16
ldp d12,d13,[sp],#16
ldp d10,d11,[sp],#16
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 40,421
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha-armv8-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
.section .rodata
.align 5
.Lsigma:
.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
.Lone:
.long 1,0,0,0
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
.globl ChaCha20_ctr32_nohw
.hidden ChaCha20_ctr32_nohw
.type ChaCha20_ctr32_nohw,%function
.align 5
ChaCha20_ctr32_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,.Lsigma
add x5,x5,:lo12:.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ldp x28,x30,[x4] // load counter
#ifdef __AARCH64EB__
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
.Loop_outer:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov w7,w23
lsr x8,x23,#32
mov w9,w24
lsr x10,x24,#32
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#64
.Loop:
sub x4,x4,#1
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
ror w21,w21,#16
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#20
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
ror w21,w21,#24
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#25
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#16
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
ror w9,w9,#20
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#24
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
ror w9,w9,#25
cbnz x4,.Loop
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
b.lo .Ltail
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.hi .Loop_outer
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 4
.Ltail:
add x2,x2,#64
.Less_than_64:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
stp x5,x7,[sp,#0]
stp x9,x11,[sp,#16]
stp x13,x15,[sp,#32]
stp x17,x20,[sp,#48]
.Loop_tail:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,.Loop_tail
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw
.globl ChaCha20_ctr32_neon
.hidden ChaCha20_ctr32_neon
.type ChaCha20_ctr32_neon,%function
.align 5
ChaCha20_ctr32_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,.Lsigma
add x5,x5,:lo12:.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
cmp x2,#512
b.hs .L512_or_more_neon
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
.Loop_outer_neon:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov v0.16b,v24.16b
mov w7,w23
lsr x8,x23,#32
mov v4.16b,v24.16b
mov w9,w24
lsr x10,x24,#32
mov v16.16b,v24.16b
mov w11,w25
mov v1.16b,v25.16b
lsr x12,x25,#32
mov v5.16b,v25.16b
mov w13,w26
mov v17.16b,v25.16b
lsr x14,x26,#32
mov v3.16b,v27.16b
mov w15,w27
mov v7.16b,v28.16b
lsr x16,x27,#32
mov v19.16b,v29.16b
mov w17,w28
mov v2.16b,v26.16b
lsr x19,x28,#32
mov v6.16b,v26.16b
mov w20,w30
mov v18.16b,v26.16b
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#256
.Loop_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v16.4s,v16.4s,v17.4s
add w7,w7,w11
eor v3.16b,v3.16b,v0.16b
add w8,w8,w12
eor v7.16b,v7.16b,v4.16b
eor w17,w17,w5
eor v19.16b,v19.16b,v16.16b
eor w19,w19,w6
rev32 v3.8h,v3.8h
eor w20,w20,w7
rev32 v7.8h,v7.8h
eor w21,w21,w8
rev32 v19.8h,v19.8h
ror w17,w17,#16
add v2.4s,v2.4s,v3.4s
ror w19,w19,#16
add v6.4s,v6.4s,v7.4s
ror w20,w20,#16
add v18.4s,v18.4s,v19.4s
ror w21,w21,#16
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#20
add w16,w16,w21
ushr v5.4s,v21.4s,#20
eor w9,w9,w13
ushr v17.4s,v22.4s,#20
eor w10,w10,w14
sli v1.4s,v20.4s,#12
eor w11,w11,w15
sli v5.4s,v21.4s,#12
eor w12,w12,w16
sli v17.4s,v22.4s,#12
ror w9,w9,#20
add v0.4s,v0.4s,v1.4s
ror w10,w10,#20
add v4.4s,v4.4s,v5.4s
ror w11,w11,#20
add v16.4s,v16.4s,v17.4s
ror w12,w12,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w9
eor v21.16b,v7.16b,v4.16b
add w6,w6,w10
eor v22.16b,v19.16b,v16.16b
add w7,w7,w11
ushr v3.4s,v20.4s,#24
add w8,w8,w12
ushr v7.4s,v21.4s,#24
eor w17,w17,w5
ushr v19.4s,v22.4s,#24
eor w19,w19,w6
sli v3.4s,v20.4s,#8
eor w20,w20,w7
sli v7.4s,v21.4s,#8
eor w21,w21,w8
sli v19.4s,v22.4s,#8
ror w17,w17,#24
add v2.4s,v2.4s,v3.4s
ror w19,w19,#24
add v6.4s,v6.4s,v7.4s
ror w20,w20,#24
add v18.4s,v18.4s,v19.4s
ror w21,w21,#24
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#25
add w16,w16,w21
ushr v5.4s,v21.4s,#25
eor w9,w9,w13
ushr v17.4s,v22.4s,#25
eor w10,w10,w14
sli v1.4s,v20.4s,#7
eor w11,w11,w15
sli v5.4s,v21.4s,#7
eor w12,w12,w16
sli v17.4s,v22.4s,#7
ror w9,w9,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w10,w10,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w10
add v4.4s,v4.4s,v5.4s
add w6,w6,w11
add v16.4s,v16.4s,v17.4s
add w7,w7,w12
eor v3.16b,v3.16b,v0.16b
add w8,w8,w9
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w5
eor v19.16b,v19.16b,v16.16b
eor w17,w17,w6
rev32 v3.8h,v3.8h
eor w19,w19,w7
rev32 v7.8h,v7.8h
eor w20,w20,w8
rev32 v19.8h,v19.8h
ror w21,w21,#16
add v2.4s,v2.4s,v3.4s
ror w17,w17,#16
add v6.4s,v6.4s,v7.4s
ror w19,w19,#16
add v18.4s,v18.4s,v19.4s
ror w20,w20,#16
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#20
add w14,w14,w20
ushr v5.4s,v21.4s,#20
eor w10,w10,w15
ushr v17.4s,v22.4s,#20
eor w11,w11,w16
sli v1.4s,v20.4s,#12
eor w12,w12,w13
sli v5.4s,v21.4s,#12
eor w9,w9,w14
sli v17.4s,v22.4s,#12
ror w10,w10,#20
add v0.4s,v0.4s,v1.4s
ror w11,w11,#20
add v4.4s,v4.4s,v5.4s
ror w12,w12,#20
add v16.4s,v16.4s,v17.4s
ror w9,w9,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w10
eor v21.16b,v7.16b,v4.16b
add w6,w6,w11
eor v22.16b,v19.16b,v16.16b
add w7,w7,w12
ushr v3.4s,v20.4s,#24
add w8,w8,w9
ushr v7.4s,v21.4s,#24
eor w21,w21,w5
ushr v19.4s,v22.4s,#24
eor w17,w17,w6
sli v3.4s,v20.4s,#8
eor w19,w19,w7
sli v7.4s,v21.4s,#8
eor w20,w20,w8
sli v19.4s,v22.4s,#8
ror w21,w21,#24
add v2.4s,v2.4s,v3.4s
ror w17,w17,#24
add v6.4s,v6.4s,v7.4s
ror w19,w19,#24
add v18.4s,v18.4s,v19.4s
ror w20,w20,#24
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#25
add w14,w14,w20
ushr v5.4s,v21.4s,#25
eor w10,w10,w15
ushr v17.4s,v22.4s,#25
eor w11,w11,w16
sli v1.4s,v20.4s,#7
eor w12,w12,w13
sli v5.4s,v21.4s,#7
eor w9,w9,w14
sli v17.4s,v22.4s,#7
ror w10,w10,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w11,w11,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w12,w12,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
cbnz x4,.Loop_neon
add w5,w5,w22 // accumulate key block
add v0.4s,v0.4s,v24.4s
add x6,x6,x22,lsr#32
add v4.4s,v4.4s,v24.4s
add w7,w7,w23
add v16.4s,v16.4s,v24.4s
add x8,x8,x23,lsr#32
add v2.4s,v2.4s,v26.4s
add w9,w9,w24
add v6.4s,v6.4s,v26.4s
add x10,x10,x24,lsr#32
add v18.4s,v18.4s,v26.4s
add w11,w11,w25
add v3.4s,v3.4s,v27.4s
add x12,x12,x25,lsr#32
add w13,w13,w26
add v7.4s,v7.4s,v28.4s
add x14,x14,x26,lsr#32
add w15,w15,w27
add v19.4s,v19.4s,v29.4s
add x16,x16,x27,lsr#32
add w17,w17,w28
add v1.4s,v1.4s,v25.4s
add x19,x19,x28,lsr#32
add w20,w20,w30
add v5.4s,v5.4s,v25.4s
add x21,x21,x30,lsr#32
add v17.4s,v17.4s,v25.4s
b.lo .Ltail_neon
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v20.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v21.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v22.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v23.16b
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
add v27.4s,v27.4s,v31.4s // += 4
stp x13,x15,[x0,#32]
add v28.4s,v28.4s,v31.4s
stp x17,x20,[x0,#48]
add v29.4s,v29.4s,v31.4s
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
eor v16.16b,v16.16b,v0.16b
eor v17.16b,v17.16b,v1.16b
eor v18.16b,v18.16b,v2.16b
eor v19.16b,v19.16b,v3.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
b.hi .Loop_outer_neon
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.Ltail_neon:
add x2,x2,#256
cmp x2,#64
b.lo .Less_than_64
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.eq .Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo .Less_than_128
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v0.16b,v0.16b,v20.16b
eor v1.16b,v1.16b,v21.16b
eor v2.16b,v2.16b,v22.16b
eor v3.16b,v3.16b,v23.16b
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
b.eq .Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo .Less_than_192
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
b.eq .Ldone_neon
sub x2,x2,#64
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp]
b .Last_neon
.Less_than_128:
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp]
b .Last_neon
.Less_than_192:
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp]
b .Last_neon
.align 4
.Last_neon:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
.Loop_tail_neon:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,.Loop_tail_neon
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
.Ldone_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ChaCha20_ctr32_neon,.-ChaCha20_ctr32_neon
.type ChaCha20_512_neon,%function
.align 5
ChaCha20_512_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,.Lsigma
add x5,x5,:lo12:.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
.L512_or_more_neon:
sub sp,sp,#128+64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
stp q24,q25,[sp,#0] // off-load key block, invariant part
add v27.4s,v27.4s,v31.4s // not typo
str q26,[sp,#32]
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
add v30.4s,v29.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
stp d8,d9,[sp,#128+0] // meet ABI requirements
stp d10,d11,[sp,#128+16]
stp d12,d13,[sp,#128+32]
stp d14,d15,[sp,#128+48]
sub x2,x2,#512 // not typo
.Loop_outer_512_neon:
mov v0.16b,v24.16b
mov v4.16b,v24.16b
mov v8.16b,v24.16b
mov v12.16b,v24.16b
mov v16.16b,v24.16b
mov v20.16b,v24.16b
mov v1.16b,v25.16b
mov w5,w22 // unpack key block
mov v5.16b,v25.16b
lsr x6,x22,#32
mov v9.16b,v25.16b
mov w7,w23
mov v13.16b,v25.16b
lsr x8,x23,#32
mov v17.16b,v25.16b
mov w9,w24
mov v21.16b,v25.16b
lsr x10,x24,#32
mov v3.16b,v27.16b
mov w11,w25
mov v7.16b,v28.16b
lsr x12,x25,#32
mov v11.16b,v29.16b
mov w13,w26
mov v15.16b,v30.16b
lsr x14,x26,#32
mov v2.16b,v26.16b
mov w15,w27
mov v6.16b,v26.16b
lsr x16,x27,#32
add v19.4s,v3.4s,v31.4s // +4
mov w17,w28
add v23.4s,v7.4s,v31.4s // +4
lsr x19,x28,#32
mov v10.16b,v26.16b
mov w20,w30
mov v14.16b,v26.16b
lsr x21,x30,#32
mov v18.16b,v26.16b
stp q27,q28,[sp,#48] // off-load key block, variable part
mov v22.16b,v26.16b
str q29,[sp,#80]
mov x4,#5
subs x2,x2,#512
.Loop_upper_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,.Loop_upper_neon
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
mov w5,w22 // unpack key block
lsr x6,x22,#32
stp x9,x11,[x0,#16]
mov w7,w23
lsr x8,x23,#32
stp x13,x15,[x0,#32]
mov w9,w24
lsr x10,x24,#32
stp x17,x20,[x0,#48]
add x0,x0,#64
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#5
.Loop_lower_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,.Loop_lower_neon
add w5,w5,w22 // accumulate key block
ldp q24,q25,[sp,#0]
add x6,x6,x22,lsr#32
ldp q26,q27,[sp,#32]
add w7,w7,w23
ldp q28,q29,[sp,#64]
add x8,x8,x23,lsr#32
add v0.4s,v0.4s,v24.4s
add w9,w9,w24
add v4.4s,v4.4s,v24.4s
add x10,x10,x24,lsr#32
add v8.4s,v8.4s,v24.4s
add w11,w11,w25
add v12.4s,v12.4s,v24.4s
add x12,x12,x25,lsr#32
add v16.4s,v16.4s,v24.4s
add w13,w13,w26
add v20.4s,v20.4s,v24.4s
add x14,x14,x26,lsr#32
add v2.4s,v2.4s,v26.4s
add w15,w15,w27
add v6.4s,v6.4s,v26.4s
add x16,x16,x27,lsr#32
add v10.4s,v10.4s,v26.4s
add w17,w17,w28
add v14.4s,v14.4s,v26.4s
add x19,x19,x28,lsr#32
add v18.4s,v18.4s,v26.4s
add w20,w20,w30
add v22.4s,v22.4s,v26.4s
add x21,x21,x30,lsr#32
add v19.4s,v19.4s,v31.4s // +4
add x5,x5,x6,lsl#32 // pack
add v23.4s,v23.4s,v31.4s // +4
add x7,x7,x8,lsl#32
add v3.4s,v3.4s,v27.4s
ldp x6,x8,[x1,#0] // load input
add v7.4s,v7.4s,v28.4s
add x9,x9,x10,lsl#32
add v11.4s,v11.4s,v29.4s
add x11,x11,x12,lsl#32
add v15.4s,v15.4s,v30.4s
ldp x10,x12,[x1,#16]
add v19.4s,v19.4s,v27.4s
add x13,x13,x14,lsl#32
add v23.4s,v23.4s,v28.4s
add x15,x15,x16,lsl#32
add v1.4s,v1.4s,v25.4s
ldp x14,x16,[x1,#32]
add v5.4s,v5.4s,v25.4s
add x17,x17,x19,lsl#32
add v9.4s,v9.4s,v25.4s
add x20,x20,x21,lsl#32
add v13.4s,v13.4s,v25.4s
ldp x19,x21,[x1,#48]
add v17.4s,v17.4s,v25.4s
add x1,x1,#64
add v21.4s,v21.4s,v25.4s
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v24.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v25.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v26.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v27.16b
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#7 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v24.16b
eor v5.16b,v5.16b,v25.16b
eor v6.16b,v6.16b,v26.16b
eor v7.16b,v7.16b,v27.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
eor v8.16b,v8.16b,v0.16b
ldp q24,q25,[sp,#0]
eor v9.16b,v9.16b,v1.16b
ldp q26,q27,[sp,#32]
eor v10.16b,v10.16b,v2.16b
eor v11.16b,v11.16b,v3.16b
st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64
ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64
eor v12.16b,v12.16b,v4.16b
eor v13.16b,v13.16b,v5.16b
eor v14.16b,v14.16b,v6.16b
eor v15.16b,v15.16b,v7.16b
st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64
ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64
eor v16.16b,v16.16b,v8.16b
eor v17.16b,v17.16b,v9.16b
eor v18.16b,v18.16b,v10.16b
eor v19.16b,v19.16b,v11.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
shl v0.4s,v31.4s,#1 // 4 -> 8
eor v20.16b,v20.16b,v12.16b
eor v21.16b,v21.16b,v13.16b
eor v22.16b,v22.16b,v14.16b
eor v23.16b,v23.16b,v15.16b
st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64
add v27.4s,v27.4s,v0.4s // += 8
add v28.4s,v28.4s,v0.4s
add v29.4s,v29.4s,v0.4s
add v30.4s,v30.4s,v0.4s
b.hs .Loop_outer_512_neon
adds x2,x2,#512
ushr v0.4s,v31.4s,#2 // 4 -> 1
ldp d8,d9,[sp,#128+0] // meet ABI requirements
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
stp q24,q31,[sp,#0] // wipe off-load area
stp q24,q31,[sp,#32]
stp q24,q31,[sp,#64]
b.eq .Ldone_512_neon
cmp x2,#192
sub v27.4s,v27.4s,v0.4s // -= 1
sub v28.4s,v28.4s,v0.4s
sub v29.4s,v29.4s,v0.4s
add sp,sp,#128
b.hs .Loop_outer_neon
eor v25.16b,v25.16b,v25.16b
eor v26.16b,v26.16b,v26.16b
eor v27.16b,v27.16b,v27.16b
eor v28.16b,v28.16b,v28.16b
eor v29.16b,v29.16b,v29.16b
eor v30.16b,v30.16b,v30.16b
b .Loop_outer
.Ldone_512_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#128+64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.size ChaCha20_512_neon,.-ChaCha20_512_neon
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 18,647
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/x86_64-mont-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.globl _bn_mul_mont_nohw
.private_extern _bn_mul_mont_nohw
.p2align 4
_bn_mul_mont_nohw:
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
negq %r9
movq %rsp,%r11
leaq -16(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul_page_walk
jmp L$mul_page_walk_done
.p2align 4
L$mul_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul_page_walk
L$mul_page_walk_done:
movq %rax,8(%rsp,%r9,8)
L$mul_body:
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%r13
leaq 1(%r15),%r15
jmp L$1st_enter
.p2align 4
L$1st:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r13
movq %r10,%r11
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
L$1st_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 1(%r15),%r15
movq %rdx,%r10
mulq %rbp
cmpq %r9,%r15
jne L$1st
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
movq %r10,%r11
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
jmp L$outer
.p2align 4
L$outer:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq %r8,%rbp
movq (%rsp),%r10
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq 8(%rsp),%r10
movq %rdx,%r13
leaq 1(%r15),%r15
jmp L$inner_enter
.p2align 4
L$inner:
addq %rax,%r13
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
L$inner_enter:
mulq %rbx
addq %rax,%r11
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq %r11,%r10
movq %rdx,%r11
adcq $0,%r11
leaq 1(%r15),%r15
mulq %rbp
cmpq %r9,%r15
jne L$inner
addq %rax,%r13
movq (%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
movq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %r13,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdx,%rdx
addq %r11,%r13
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r9,8)
movq %rdx,(%rsp,%r9,8)
leaq 1(%r14),%r14
cmpq %r9,%r14
jb L$outer
xorq %r14,%r14
movq (%rsp),%rax
movq %r9,%r15
.p2align 4
L$sub: sbbq (%rcx,%r14,8),%rax
movq %rax,(%rdi,%r14,8)
movq 8(%rsp,%r14,8),%rax
leaq 1(%r14),%r14
decq %r15
jnz L$sub
sbbq $0,%rax
movq $-1,%rbx
xorq %rax,%rbx
xorq %r14,%r14
movq %r9,%r15
L$copy:
movq (%rdi,%r14,8),%rcx
movq (%rsp,%r14,8),%rdx
andq %rbx,%rcx
andq %rax,%rdx
movq %r9,(%rsp,%r14,8)
orq %rcx,%rdx
movq %rdx,(%rdi,%r14,8)
leaq 1(%r14),%r14
subq $1,%r15
jnz L$copy
movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mul_epilogue:
ret
.globl _bn_mul4x_mont
.private_extern _bn_mul4x_mont
.p2align 4
_bn_mul4x_mont:
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
negq %r9
movq %rsp,%r11
leaq -32(%rsp,%r9,8),%r10
negq %r9
andq $-1024,%r10
subq %r10,%r11
andq $-4096,%r11
leaq (%r10,%r11,1),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul4x_page_walk
jmp L$mul4x_page_walk_done
L$mul4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r11
cmpq %r10,%rsp
ja L$mul4x_page_walk
L$mul4x_page_walk_done:
movq %rax,8(%rsp,%r9,8)
L$mul4x_body:
movq %rdi,16(%rsp,%r9,8)
movq %rdx,%r12
movq (%r8),%r8
movq (%r12),%rbx
movq (%rsi),%rax
xorq %r14,%r14
xorq %r15,%r15
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp L$1st4x
.p2align 4
L$1st4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb L$1st4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
leaq 1(%r14),%r14
.p2align 2
L$outer4x:
movq (%r12,%r14,8),%rbx
xorq %r15,%r15
movq (%rsp),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
imulq %r10,%rbp
movq %rdx,%r11
mulq %rbp
addq %rax,%r10
movq 8(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx),%rax
adcq $0,%rdx
addq 8(%rsp),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq 16(%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
leaq 4(%r15),%r15
adcq $0,%rdx
movq %rdi,(%rsp)
movq %rdx,%r13
jmp L$inner4x
.p2align 4
L$inner4x:
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
movq (%rcx,%r15,8),%rax
adcq $0,%rdx
addq (%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq 8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-8(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq 8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq 8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq -16(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
jb L$inner4x
mulq %rbx
addq %rax,%r10
movq -16(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -16(%rsp,%r15,8),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
movq -8(%rsi,%r15,8),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
movq %r13,-24(%rsp,%r15,8)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
movq -8(%rcx,%r15,8),%rax
adcq $0,%rdx
addq -8(%rsp,%r15,8),%r11
adcq $0,%rdx
leaq 1(%r14),%r14
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
movq (%rsi),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
movq %rdi,-16(%rsp,%r15,8)
movq %rdx,%r13
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
addq (%rsp,%r9,8),%r13
adcq $0,%rdi
movq %r13,-8(%rsp,%r15,8)
movq %rdi,(%rsp,%r15,8)
cmpq %r9,%r14
jb L$outer4x
movq 16(%rsp,%r9,8),%rdi
leaq -4(%r9),%r15
movq 0(%rsp),%rax
movq 8(%rsp),%rdx
shrq $2,%r15
leaq (%rsp),%rsi
xorq %r14,%r14
subq 0(%rcx),%rax
movq 16(%rsi),%rbx
movq 24(%rsi),%rbp
sbbq 8(%rcx),%rdx
L$sub4x:
movq %rax,0(%rdi,%r14,8)
movq %rdx,8(%rdi,%r14,8)
sbbq 16(%rcx,%r14,8),%rbx
movq 32(%rsi,%r14,8),%rax
movq 40(%rsi,%r14,8),%rdx
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
movq %rbp,24(%rdi,%r14,8)
sbbq 32(%rcx,%r14,8),%rax
movq 48(%rsi,%r14,8),%rbx
movq 56(%rsi,%r14,8),%rbp
sbbq 40(%rcx,%r14,8),%rdx
leaq 4(%r14),%r14
decq %r15
jnz L$sub4x
movq %rax,0(%rdi,%r14,8)
movq 32(%rsi,%r14,8),%rax
sbbq 16(%rcx,%r14,8),%rbx
movq %rdx,8(%rdi,%r14,8)
sbbq 24(%rcx,%r14,8),%rbp
movq %rbx,16(%rdi,%r14,8)
sbbq $0,%rax
movq %rbp,24(%rdi,%r14,8)
pxor %xmm0,%xmm0
.byte 102,72,15,110,224
pcmpeqd %xmm5,%xmm5
pshufd $0,%xmm4,%xmm4
movq %r9,%r15
pxor %xmm4,%xmm5
shrq $2,%r15
xorl %eax,%eax
jmp L$copy4x
.p2align 4
L$copy4x:
movdqa (%rsp,%rax,1),%xmm1
movdqu (%rdi,%rax,1),%xmm2
pand %xmm4,%xmm1
pand %xmm5,%xmm2
movdqa 16(%rsp,%rax,1),%xmm3
movdqa %xmm0,(%rsp,%rax,1)
por %xmm2,%xmm1
movdqu 16(%rdi,%rax,1),%xmm2
movdqu %xmm1,(%rdi,%rax,1)
pand %xmm4,%xmm3
pand %xmm5,%xmm2
movdqa %xmm0,16(%rsp,%rax,1)
por %xmm2,%xmm3
movdqu %xmm3,16(%rdi,%rax,1)
leaq 32(%rax),%rax
decq %r15
jnz L$copy4x
movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mul4x_epilogue:
ret
.globl _bn_sqr8x_mont
.private_extern _bn_sqr8x_mont
.p2align 5
_bn_sqr8x_mont:
_CET_ENDBR
movl %r9d,%r9d
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$sqr8x_prologue:
movl %r9d,%r10d
shll $3,%r9d
shlq $3+2,%r10
negq %r9
leaq -64(%rsp,%r9,2),%r11
movq %rsp,%rbp
movq (%r8),%r8
subq %rsi,%r11
andq $4095,%r11
cmpq %r11,%r10
jb L$sqr8x_sp_alt
subq %r11,%rbp
leaq -64(%rbp,%r9,2),%rbp
jmp L$sqr8x_sp_done
.p2align 5
L$sqr8x_sp_alt:
leaq 4096-64(,%r9,2),%r10
leaq -64(%rbp,%r9,2),%rbp
subq %r10,%r11
movq $0,%r10
cmovcq %r10,%r11
subq %r11,%rbp
L$sqr8x_sp_done:
andq $-64,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$sqr8x_page_walk
jmp L$sqr8x_page_walk_done
.p2align 4
L$sqr8x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$sqr8x_page_walk
L$sqr8x_page_walk_done:
movq %r9,%r10
negq %r9
movq %r8,32(%rsp)
movq %rax,40(%rsp)
L$sqr8x_body:
.byte 102,72,15,110,209
pxor %xmm0,%xmm0
.byte 102,72,15,110,207
.byte 102,73,15,110,218
testq %rdx,%rdx
jz L$sqr8x_nox
call _bn_sqrx8x_internal
leaq (%r8,%rcx,1),%rbx
movq %rcx,%r9
movq %rcx,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp L$sqr8x_sub
.p2align 5
L$sqr8x_nox:
call _bn_sqr8x_internal
leaq (%rdi,%r9,1),%rbx
movq %r9,%rcx
movq %r9,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp L$sqr8x_sub
.p2align 5
L$sqr8x_sub:
movq 0(%rbx),%r12
movq 8(%rbx),%r13
movq 16(%rbx),%r14
movq 24(%rbx),%r15
leaq 32(%rbx),%rbx
sbbq 0(%rbp),%r12
sbbq 8(%rbp),%r13
sbbq 16(%rbp),%r14
sbbq 24(%rbp),%r15
leaq 32(%rbp),%rbp
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz L$sqr8x_sub
sbbq $0,%rax
leaq (%rbx,%r9,1),%rbx
leaq (%rdi,%r9,1),%rdi
.byte 102,72,15,110,200
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
jmp L$sqr8x_cond_copy
.p2align 5
L$sqr8x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
movdqa %xmm0,-32(%rbx,%rdx,1)
movdqa %xmm0,-16(%rbx,%rdx,1)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
addq $32,%r9
jnz L$sqr8x_cond_copy
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$sqr8x_epilogue:
ret
.globl _bn_mulx4x_mont
.private_extern _bn_mulx4x_mont
.p2align 5
_bn_mulx4x_mont:
_CET_ENDBR
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
L$mulx4x_prologue:
shll $3,%r9d
xorq %r10,%r10
subq %r9,%r10
movq (%r8),%r8
leaq -72(%rsp,%r10,1),%rbp
andq $-128,%rbp
movq %rsp,%r11
subq %rbp,%r11
andq $-4096,%r11
leaq (%r11,%rbp,1),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
jmp L$mulx4x_page_walk_done
.p2align 4
L$mulx4x_page_walk:
leaq -4096(%rsp),%rsp
movq (%rsp),%r10
cmpq %rbp,%rsp
ja L$mulx4x_page_walk
L$mulx4x_page_walk_done:
leaq (%rdx,%r9,1),%r10
movq %r9,0(%rsp)
shrq $5,%r9
movq %r10,16(%rsp)
subq $1,%r9
movq %r8,24(%rsp)
movq %rdi,32(%rsp)
movq %rax,40(%rsp)
movq %r9,48(%rsp)
jmp L$mulx4x_body
.p2align 5
L$mulx4x_body:
leaq 8(%rdx),%rdi
movq (%rdx),%rdx
leaq 64+32(%rsp),%rbx
movq %rdx,%r9
mulxq 0(%rsi),%r8,%rax
mulxq 8(%rsi),%r11,%r14
addq %rax,%r11
movq %rdi,8(%rsp)
mulxq 16(%rsi),%r12,%r13
adcq %r14,%r12
adcq $0,%r13
movq %r8,%rdi
imulq 24(%rsp),%r8
xorq %rbp,%rbp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
leaq 32(%rsi),%rsi
adcxq %rax,%r13
adcxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%rdi
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
.byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00
movq 48(%rsp),%rdi
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
adcxq %rax,%r12
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r12,-16(%rbx)
jmp L$mulx4x_1st
.p2align 5
L$mulx4x_1st:
adcxq %rbp,%r15
mulxq 0(%rsi),%r10,%rax
adcxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
.byte 0x67,0x67
movq %r8,%rdx
adcxq %rax,%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
movq %r11,-32(%rbx)
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_1st
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
addq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
jmp L$mulx4x_outer
.p2align 5
L$mulx4x_outer:
movq (%rdi),%rdx
leaq 8(%rdi),%rdi
subq %rax,%rsi
movq %r15,(%rbx)
leaq 64+32(%rsp),%rbx
subq %rax,%rcx
mulxq 0(%rsi),%r8,%r11
xorl %ebp,%ebp
movq %rdx,%r9
mulxq 8(%rsi),%r14,%r12
adoxq -32(%rbx),%r8
adcxq %r14,%r11
mulxq 16(%rsi),%r15,%r13
adoxq -24(%rbx),%r11
adcxq %r15,%r12
adoxq -16(%rbx),%r12
adcxq %rbp,%r13
adoxq %rbp,%r13
movq %rdi,8(%rsp)
movq %r8,%r15
imulq 24(%rsp),%r8
xorl %ebp,%ebp
mulxq 24(%rsi),%rax,%r14
movq %r8,%rdx
adcxq %rax,%r13
adoxq -8(%rbx),%r13
adcxq %rbp,%r14
leaq 32(%rsi),%rsi
adoxq %rbp,%r14
mulxq 0(%rcx),%rax,%r10
adcxq %rax,%r15
adoxq %r11,%r10
mulxq 8(%rcx),%rax,%r11
adcxq %rax,%r10
adoxq %r12,%r11
mulxq 16(%rcx),%rax,%r12
movq %r10,-32(%rbx)
adcxq %rax,%r11
adoxq %r13,%r12
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-24(%rbx)
leaq 32(%rcx),%rcx
adcxq %rax,%r12
adoxq %rbp,%r15
movq 48(%rsp),%rdi
movq %r12,-16(%rbx)
jmp L$mulx4x_inner
.p2align 5
L$mulx4x_inner:
mulxq 0(%rsi),%r10,%rax
adcxq %rbp,%r15
adoxq %r14,%r10
mulxq 8(%rsi),%r11,%r14
adcxq 0(%rbx),%r10
adoxq %rax,%r11
mulxq 16(%rsi),%r12,%rax
adcxq 8(%rbx),%r11
adoxq %r14,%r12
mulxq 24(%rsi),%r13,%r14
movq %r8,%rdx
adcxq 16(%rbx),%r12
adoxq %rax,%r13
adcxq 24(%rbx),%r13
adoxq %rbp,%r14
leaq 32(%rsi),%rsi
leaq 32(%rbx),%rbx
adcxq %rbp,%r14
adoxq %r15,%r10
mulxq 0(%rcx),%rax,%r15
adcxq %rax,%r10
adoxq %r15,%r11
mulxq 8(%rcx),%rax,%r15
adcxq %rax,%r11
adoxq %r15,%r12
mulxq 16(%rcx),%rax,%r15
movq %r10,-40(%rbx)
adcxq %rax,%r12
adoxq %r15,%r13
mulxq 24(%rcx),%rax,%r15
movq %r9,%rdx
movq %r11,-32(%rbx)
movq %r12,-24(%rbx)
adcxq %rax,%r13
adoxq %rbp,%r15
leaq 32(%rcx),%rcx
movq %r13,-16(%rbx)
decq %rdi
jnz L$mulx4x_inner
movq 0(%rsp),%rax
movq 8(%rsp),%rdi
adcq %rbp,%r15
subq 0(%rbx),%rbp
adcq %r15,%r14
sbbq %r15,%r15
movq %r14,-8(%rbx)
cmpq 16(%rsp),%rdi
jne L$mulx4x_outer
leaq 64(%rsp),%rbx
subq %rax,%rcx
negq %r15
movq %rax,%rdx
shrq $3+2,%rax
movq 32(%rsp),%rdi
jmp L$mulx4x_sub
.p2align 5
L$mulx4x_sub:
movq 0(%rbx),%r11
movq 8(%rbx),%r12
movq 16(%rbx),%r13
movq 24(%rbx),%r14
leaq 32(%rbx),%rbx
sbbq 0(%rcx),%r11
sbbq 8(%rcx),%r12
sbbq 16(%rcx),%r13
sbbq 24(%rcx),%r14
leaq 32(%rcx),%rcx
movq %r11,0(%rdi)
movq %r12,8(%rdi)
movq %r13,16(%rdi)
movq %r14,24(%rdi)
leaq 32(%rdi),%rdi
decq %rax
jnz L$mulx4x_sub
sbbq $0,%r15
leaq 64(%rsp),%rbx
subq %rdx,%rdi
.byte 102,73,15,110,207
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
jmp L$mulx4x_cond_copy
.p2align 5
L$mulx4x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
subq $32,%rdx
jnz L$mulx4x_cond_copy
movq %rdx,(%rbx)
movq $1,%rax
movq -48(%rsi),%r15
movq -40(%rsi),%r14
movq -32(%rsi),%r13
movq -24(%rsi),%r12
movq -16(%rsi),%rbp
movq -8(%rsi),%rbx
leaq (%rsi),%rsp
L$mulx4x_epilogue:
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 4
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 34,005
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha256-armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
#endif
.text
.globl sha256_block_data_order_nohw
.def sha256_block_data_order_nohw
.type 32
.endef
.align 6
sha256_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*4
ldp w20,w21,[x0] // load context
ldp w22,w23,[x0,#2*4]
ldp w24,w25,[x0,#4*4]
add x2,x1,x2,lsl#6 // end of input
ldp w26,w27,[x0,#6*4]
adrp x30,LK256
add x30,x30,:lo12:LK256
stp x0,x2,[x29,#96]
Loop:
ldp w3,w4,[x1],#2*4
ldr w19,[x30],#4 // *K++
eor w28,w21,w22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev w3,w3 // 0
#endif
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w6,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w3 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w4,w4 // 1
#endif
ldp w5,w6,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w7,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w4 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w5,w5 // 2
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w8,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w5 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w6,w6 // 3
#endif
ldp w7,w8,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w9,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w6 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w7,w7 // 4
#endif
add w24,w24,w17 // h+=Sigma0(a)
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w10,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w7 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w10,ror#11 // Sigma1(e)
ror w10,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w10,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w8,w8 // 5
#endif
ldp w9,w10,[x1],#2*4
add w23,w23,w17 // h+=Sigma0(a)
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w11,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w8 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w11,ror#11 // Sigma1(e)
ror w11,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w11,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w9,w9 // 6
#endif
add w22,w22,w17 // h+=Sigma0(a)
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w12,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w9 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w12,ror#11 // Sigma1(e)
ror w12,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w12,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w10,w10 // 7
#endif
ldp w11,w12,[x1],#2*4
add w21,w21,w17 // h+=Sigma0(a)
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
eor w13,w25,w25,ror#14
and w17,w26,w25
bic w28,w27,w25
add w20,w20,w10 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w13,ror#11 // Sigma1(e)
ror w13,w21,#2
add w20,w20,w17 // h+=Ch(e,f,g)
eor w17,w21,w21,ror#9
add w20,w20,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w24,w24,w20 // d+=h
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w13,w17,ror#13 // Sigma0(a)
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w20,w20,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w11,w11 // 8
#endif
add w20,w20,w17 // h+=Sigma0(a)
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w14,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w11 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w14,ror#11 // Sigma1(e)
ror w14,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w14,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w12,w12 // 9
#endif
ldp w13,w14,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w15,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w12 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w15,ror#11 // Sigma1(e)
ror w15,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w15,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w13,w13 // 10
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w0,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w13 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w0,ror#11 // Sigma1(e)
ror w0,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w0,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w14,w14 // 11
#endif
ldp w15,w0,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w6,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w14 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w15,w15 // 12
#endif
add w24,w24,w17 // h+=Sigma0(a)
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w7,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w15 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w0,w0 // 13
#endif
ldp w1,w2,[x1]
add w23,w23,w17 // h+=Sigma0(a)
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w8,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w0 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w1,w1 // 14
#endif
ldr w6,[sp,#12]
add w22,w22,w17 // h+=Sigma0(a)
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w9,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w1 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w2,w2 // 15
#endif
ldr w7,[sp,#0]
add w21,w21,w17 // h+=Sigma0(a)
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
Loop_16_xx:
ldr w8,[sp,#4]
str w11,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w10,w5,#7
and w17,w25,w24
ror w9,w2,#17
bic w19,w26,w24
ror w11,w20,#2
add w27,w27,w3 // h+=X[i]
eor w16,w16,w24,ror#11
eor w10,w10,w5,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w11,w11,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w9,w9,w2,ror#19
eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w11,w20,ror#22 // Sigma0(a)
eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
add w4,w4,w13
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w4,w4,w10
add w27,w27,w17 // h+=Sigma0(a)
add w4,w4,w9
ldr w9,[sp,#8]
str w12,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w11,w6,#7
and w17,w24,w23
ror w10,w3,#17
bic w28,w25,w23
ror w12,w27,#2
add w26,w26,w4 // h+=X[i]
eor w16,w16,w23,ror#11
eor w11,w11,w6,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w12,w12,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w10,w10,w3,ror#19
eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w12,w27,ror#22 // Sigma0(a)
eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
add w5,w5,w14
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w5,w5,w11
add w26,w26,w17 // h+=Sigma0(a)
add w5,w5,w10
ldr w10,[sp,#12]
str w13,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w12,w7,#7
and w17,w23,w22
ror w11,w4,#17
bic w19,w24,w22
ror w13,w26,#2
add w25,w25,w5 // h+=X[i]
eor w16,w16,w22,ror#11
eor w12,w12,w7,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w13,w13,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w11,w11,w4,ror#19
eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w13,w26,ror#22 // Sigma0(a)
eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
add w6,w6,w15
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w6,w6,w12
add w25,w25,w17 // h+=Sigma0(a)
add w6,w6,w11
ldr w11,[sp,#0]
str w14,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w13,w8,#7
and w17,w22,w21
ror w12,w5,#17
bic w28,w23,w21
ror w14,w25,#2
add w24,w24,w6 // h+=X[i]
eor w16,w16,w21,ror#11
eor w13,w13,w8,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w14,w14,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w12,w12,w5,ror#19
eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w14,w25,ror#22 // Sigma0(a)
eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
add w7,w7,w0
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w7,w7,w13
add w24,w24,w17 // h+=Sigma0(a)
add w7,w7,w12
ldr w12,[sp,#4]
str w15,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w14,w9,#7
and w17,w21,w20
ror w13,w6,#17
bic w19,w22,w20
ror w15,w24,#2
add w23,w23,w7 // h+=X[i]
eor w16,w16,w20,ror#11
eor w14,w14,w9,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w15,w15,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w13,w13,w6,ror#19
eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w15,w24,ror#22 // Sigma0(a)
eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
add w8,w8,w1
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w8,w8,w14
add w23,w23,w17 // h+=Sigma0(a)
add w8,w8,w13
ldr w13,[sp,#8]
str w0,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w15,w10,#7
and w17,w20,w27
ror w14,w7,#17
bic w28,w21,w27
ror w0,w23,#2
add w22,w22,w8 // h+=X[i]
eor w16,w16,w27,ror#11
eor w15,w15,w10,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w0,w0,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w14,w14,w7,ror#19
eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w0,w23,ror#22 // Sigma0(a)
eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
add w9,w9,w2
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w9,w9,w15
add w22,w22,w17 // h+=Sigma0(a)
add w9,w9,w14
ldr w14,[sp,#12]
str w1,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w0,w11,#7
and w17,w27,w26
ror w15,w8,#17
bic w19,w20,w26
ror w1,w22,#2
add w21,w21,w9 // h+=X[i]
eor w16,w16,w26,ror#11
eor w0,w0,w11,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w1,w1,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w15,w15,w8,ror#19
eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w1,w22,ror#22 // Sigma0(a)
eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
add w10,w10,w3
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w10,w10,w0
add w21,w21,w17 // h+=Sigma0(a)
add w10,w10,w15
ldr w15,[sp,#0]
str w2,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w1,w12,#7
and w17,w26,w25
ror w0,w9,#17
bic w28,w27,w25
ror w2,w21,#2
add w20,w20,w10 // h+=X[i]
eor w16,w16,w25,ror#11
eor w1,w1,w12,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w2,w2,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w0,w0,w9,ror#19
eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w2,w21,ror#22 // Sigma0(a)
eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
add w11,w11,w4
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w11,w11,w1
add w20,w20,w17 // h+=Sigma0(a)
add w11,w11,w0
ldr w0,[sp,#4]
str w3,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w2,w13,#7
and w17,w25,w24
ror w1,w10,#17
bic w19,w26,w24
ror w3,w20,#2
add w27,w27,w11 // h+=X[i]
eor w16,w16,w24,ror#11
eor w2,w2,w13,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w3,w3,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w1,w1,w10,ror#19
eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w3,w20,ror#22 // Sigma0(a)
eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
add w12,w12,w5
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w12,w12,w2
add w27,w27,w17 // h+=Sigma0(a)
add w12,w12,w1
ldr w1,[sp,#8]
str w4,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w3,w14,#7
and w17,w24,w23
ror w2,w11,#17
bic w28,w25,w23
ror w4,w27,#2
add w26,w26,w12 // h+=X[i]
eor w16,w16,w23,ror#11
eor w3,w3,w14,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w4,w4,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w2,w2,w11,ror#19
eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w4,w27,ror#22 // Sigma0(a)
eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
add w13,w13,w6
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w13,w13,w3
add w26,w26,w17 // h+=Sigma0(a)
add w13,w13,w2
ldr w2,[sp,#12]
str w5,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w4,w15,#7
and w17,w23,w22
ror w3,w12,#17
bic w19,w24,w22
ror w5,w26,#2
add w25,w25,w13 // h+=X[i]
eor w16,w16,w22,ror#11
eor w4,w4,w15,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w5,w5,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w3,w3,w12,ror#19
eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w5,w26,ror#22 // Sigma0(a)
eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
add w14,w14,w7
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w14,w14,w4
add w25,w25,w17 // h+=Sigma0(a)
add w14,w14,w3
ldr w3,[sp,#0]
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w5,w0,#7
and w17,w22,w21
ror w4,w13,#17
bic w28,w23,w21
ror w6,w25,#2
add w24,w24,w14 // h+=X[i]
eor w16,w16,w21,ror#11
eor w5,w5,w0,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w6,w6,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w4,w4,w13,ror#19
eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w25,ror#22 // Sigma0(a)
eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
add w15,w15,w8
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w15,w15,w5
add w24,w24,w17 // h+=Sigma0(a)
add w15,w15,w4
ldr w4,[sp,#4]
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w6,w1,#7
and w17,w21,w20
ror w5,w14,#17
bic w19,w22,w20
ror w7,w24,#2
add w23,w23,w15 // h+=X[i]
eor w16,w16,w20,ror#11
eor w6,w6,w1,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w7,w7,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w5,w5,w14,ror#19
eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w24,ror#22 // Sigma0(a)
eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
add w0,w0,w9
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w0,w0,w6
add w23,w23,w17 // h+=Sigma0(a)
add w0,w0,w5
ldr w5,[sp,#8]
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w7,w2,#7
and w17,w20,w27
ror w6,w15,#17
bic w28,w21,w27
ror w8,w23,#2
add w22,w22,w0 // h+=X[i]
eor w16,w16,w27,ror#11
eor w7,w7,w2,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w8,w8,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w6,w6,w15,ror#19
eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w23,ror#22 // Sigma0(a)
eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
add w1,w1,w10
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w1,w1,w7
add w22,w22,w17 // h+=Sigma0(a)
add w1,w1,w6
ldr w6,[sp,#12]
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w8,w3,#7
and w17,w27,w26
ror w7,w0,#17
bic w19,w20,w26
ror w9,w22,#2
add w21,w21,w1 // h+=X[i]
eor w16,w16,w26,ror#11
eor w8,w8,w3,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w9,w9,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w7,w7,w0,ror#19
eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w22,ror#22 // Sigma0(a)
eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
add w2,w2,w11
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w2,w2,w8
add w21,w21,w17 // h+=Sigma0(a)
add w2,w2,w7
ldr w7,[sp,#0]
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
cbnz w19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#260 // rewind
ldp w3,w4,[x0]
ldp w5,w6,[x0,#2*4]
add x1,x1,#14*4 // advance input pointer
ldp w7,w8,[x0,#4*4]
add w20,w20,w3
ldp w9,w10,[x0,#6*4]
add w21,w21,w4
add w22,w22,w5
add w23,w23,w6
stp w20,w21,[x0]
add w24,w24,w7
add w25,w25,w8
stp w22,w23,[x0,#2*4]
add w26,w26,w9
add w27,w27,w10
cmp x1,x2
stp w24,w25,[x0,#4*4]
stp w26,w27,[x0,#6*4]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*4
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section .rodata
.align 6
LK256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0 //terminator
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl sha256_block_data_order_hw
.def sha256_block_data_order_hw
.type 32
.endef
.align 6
sha256_block_data_order_hw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v0.4s,v1.4s},[x0]
adrp x3,LK256
add x3,x3,:lo12:LK256
Loop_hw:
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
sub x2,x2,#1
ld1 {v16.4s},[x3],#16
rev32 v4.16b,v4.16b
rev32 v5.16b,v5.16b
rev32 v6.16b,v6.16b
rev32 v7.16b,v7.16b
orr v18.16b,v0.16b,v0.16b // offload
orr v19.16b,v1.16b,v1.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
ld1 {v17.4s},[x3]
add v16.4s,v16.4s,v6.4s
sub x3,x3,#64*4-16 // rewind
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
add v17.4s,v17.4s,v7.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
add v0.4s,v0.4s,v18.4s
add v1.4s,v1.4s,v19.4s
cbnz x2,Loop_hw
st1 {v0.4s,v1.4s},[x0]
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mktmansour/MKT-KSA-Geolocation-Security
| 25,191
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aes-gcm-avx2-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.section .rodata
.align 16
.Lbswap_mask:
.quad 0x08090a0b0c0d0e0f, 0x0001020304050607
.Lgfpoly:
.quad 1, 0xc200000000000000
.Lgfpoly_and_internal_carrybit:
.quad 1, 0xc200000000000001
.align 32
.Lctr_pattern:
.quad 0, 0
.quad 1, 0
.Linc_2blocks:
.quad 2, 0
.quad 2, 0
.text
.globl gcm_init_vpclmulqdq_avx2
.hidden gcm_init_vpclmulqdq_avx2
.type gcm_init_vpclmulqdq_avx2,@function
.align 32
gcm_init_vpclmulqdq_avx2:
.cfi_startproc
_CET_ENDBR
vpshufd $0x4e,(%rsi),%xmm3
vpshufd $0xd3,%xmm3,%xmm0
vpsrad $31,%xmm0,%xmm0
vpaddq %xmm3,%xmm3,%xmm3
vpand .Lgfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0
vpxor %xmm0,%xmm3,%xmm3
vbroadcasti128 .Lgfpoly(%rip),%ymm6
vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0
vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1
vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2
vpxor %xmm2,%xmm1,%xmm1
vpclmulqdq $0x01,%xmm0,%xmm6,%xmm2
vpshufd $0x4e,%xmm0,%xmm0
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5
vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0
vpshufd $0x4e,%xmm1,%xmm1
vpxor %xmm1,%xmm5,%xmm5
vpxor %xmm0,%xmm5,%xmm5
vinserti128 $1,%xmm3,%ymm5,%ymm3
vinserti128 $1,%xmm5,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xc5,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcd,0x01
.byte 0xc4,0xe3,0x65,0x44,0xd5,0x10
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x65,0x44,0xe5,0x11
.byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm4,%ymm4
vpxor %ymm0,%ymm4,%ymm4
vmovdqu %ymm3,96(%rdi)
vmovdqu %ymm4,64(%rdi)
vpunpcklqdq %ymm3,%ymm4,%ymm0
vpunpckhqdq %ymm3,%ymm4,%ymm1
vpxor %ymm1,%ymm0,%ymm0
vmovdqu %ymm0,128+32(%rdi)
.byte 0xc4,0xe3,0x5d,0x44,0xc5,0x00
.byte 0xc4,0xe3,0x5d,0x44,0xcd,0x01
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x10
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x5d,0x44,0xdd,0x11
.byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm3,%ymm3
vpxor %ymm0,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xc5,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcd,0x01
.byte 0xc4,0xe3,0x65,0x44,0xd5,0x10
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x65,0x44,0xe5,0x11
.byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm4,%ymm4
vpxor %ymm0,%ymm4,%ymm4
vmovdqu %ymm3,32(%rdi)
vmovdqu %ymm4,0(%rdi)
vpunpcklqdq %ymm3,%ymm4,%ymm0
vpunpckhqdq %ymm3,%ymm4,%ymm1
vpxor %ymm1,%ymm0,%ymm0
vmovdqu %ymm0,128(%rdi)
vzeroupper
ret
.cfi_endproc
.size gcm_init_vpclmulqdq_avx2, . - gcm_init_vpclmulqdq_avx2
.globl gcm_ghash_vpclmulqdq_avx2_1
.hidden gcm_ghash_vpclmulqdq_avx2_1
.type gcm_ghash_vpclmulqdq_avx2_1,@function
.align 32
gcm_ghash_vpclmulqdq_avx2_1:
.cfi_startproc
_CET_ENDBR
vmovdqu .Lbswap_mask(%rip),%xmm6
vmovdqu .Lgfpoly(%rip),%xmm7
vmovdqu (%rdi),%xmm5
vpshufb %xmm6,%xmm5,%xmm5
.Lghash_lastblock:
vmovdqu (%rdx),%xmm0
vpshufb %xmm6,%xmm0,%xmm0
vpxor %xmm0,%xmm5,%xmm5
vmovdqu 128-16(%rsi),%xmm0
vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1
vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2
vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3
vpxor %xmm3,%xmm2,%xmm2
vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3
vpshufd $0x4e,%xmm1,%xmm1
vpxor %xmm1,%xmm2,%xmm2
vpxor %xmm3,%xmm2,%xmm2
vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5
vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1
vpshufd $0x4e,%xmm2,%xmm2
vpxor %xmm2,%xmm5,%xmm5
vpxor %xmm1,%xmm5,%xmm5
.Lghash_done:
vpshufb %xmm6,%xmm5,%xmm5
vmovdqu %xmm5,(%rdi)
vzeroupper
ret
.cfi_endproc
.size gcm_ghash_vpclmulqdq_avx2_1, . - gcm_ghash_vpclmulqdq_avx2_1
.globl aes_gcm_enc_update_vaes_avx2
.hidden aes_gcm_enc_update_vaes_avx2
.type aes_gcm_enc_update_vaes_avx2,@function
.align 32
aes_gcm_enc_update_vaes_avx2:
.cfi_startproc
_CET_ENDBR
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
movq 16(%rsp),%r12
#ifdef BORINGSSL_DISPATCH_TEST
.extern BORINGSSL_function_hit
.hidden BORINGSSL_function_hit
movb $1,BORINGSSL_function_hit+8(%rip)
#endif
vbroadcasti128 .Lbswap_mask(%rip),%ymm0
vmovdqu (%r12),%xmm1
vpshufb %xmm0,%xmm1,%xmm1
vbroadcasti128 (%r8),%ymm11
vpshufb %ymm0,%ymm11,%ymm11
movl 240(%rcx),%r10d
leal -20(,%r10,4),%r10d
leaq 96(%rcx,%r10,4),%r11
vbroadcasti128 (%rcx),%ymm9
vbroadcasti128 (%r11),%ymm10
vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
cmpq $127,%rdx
jbe .Lcrypt_loop_4x_done__func1
vmovdqu 128(%r9),%ymm7
vmovdqu 128+32(%r9),%ymm8
vmovdqu .Linc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
leaq 16(%rcx),%rax
.Lvaesenc_loop_first_4_vecs__func1:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
addq $16,%rax
cmpq %rax,%r11
jne .Lvaesenc_loop_first_4_vecs__func1
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
.byte 0xc4,0x62,0x1d,0xdd,0xe2
.byte 0xc4,0x62,0x15,0xdd,0xeb
.byte 0xc4,0x62,0x0d,0xdd,0xf5
.byte 0xc4,0x62,0x05,0xdd,0xfe
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
addq $-128,%rdx
cmpq $127,%rdx
jbe .Lghash_last_ciphertext_4x__func1
.align 16
.Lcrypt_loop_4x__func1:
vmovdqu .Linc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
cmpl $24,%r10d
jl .Laes128__func1
je .Laes192__func1
vbroadcasti128 -208(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -192(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.Laes192__func1:
vbroadcasti128 -176(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -160(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.Laes128__func1:
prefetcht0 512(%rdi)
prefetcht0 512+64(%rdi)
vmovdqu 0(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xec,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcc,0x11
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00
vbroadcasti128 -144(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -128(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 32(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -112(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 64(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
vbroadcasti128 -96(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -80(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vbroadcasti128 -64(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 96(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -48(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 .Lgfpoly(%rip),%ymm4
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -32(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -16(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
subq $-128,%rsi
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
.byte 0xc4,0x62,0x1d,0xdd,0xe2
.byte 0xc4,0x62,0x15,0xdd,0xeb
.byte 0xc4,0x62,0x0d,0xdd,0xf5
.byte 0xc4,0x62,0x05,0xdd,0xfe
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
addq $-128,%rdx
cmpq $127,%rdx
ja .Lcrypt_loop_4x__func1
.Lghash_last_ciphertext_4x__func1:
vmovdqu 0(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xec,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcc,0x11
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00
vmovdqu 32(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 64(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 96(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10
vpxor %ymm2,%ymm6,%ymm6
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 .Lgfpoly(%rip),%ymm4
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
.byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
subq $-128,%rsi
.Lcrypt_loop_4x_done__func1:
testq %rdx,%rdx
jz .Ldone__func1
leaq 128(%r9),%r8
subq %rdx,%r8
vpxor %xmm5,%xmm5,%xmm5
vpxor %xmm6,%xmm6,%xmm6
vpxor %xmm7,%xmm7,%xmm7
cmpq $64,%rdx
jb .Llessthan64bytes__func1
vpshufb %ymm0,%ymm11,%ymm12
vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
.Lvaesenc_loop_tail_1__func1:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne .Lvaesenc_loop_tail_1__func1
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%ymm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %ymm3,%ymm13,%ymm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpshufb %ymm0,%ymm13,%ymm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%ymm3
.byte 0xc4,0xe3,0x1d,0x44,0xea,0x00
.byte 0xc4,0xe3,0x1d,0x44,0xf2,0x01
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xfa,0x11
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x11
vpxor %ymm4,%ymm7,%ymm7
addq $64,%r8
addq $64,%rdi
addq $64,%rsi
subq $64,%rdx
jz .Lreduce__func1
vpxor %xmm1,%xmm1,%xmm1
.Llessthan64bytes__func1:
vpshufb %ymm0,%ymm11,%ymm12
vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
.Lvaesenc_loop_tail_2__func1:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne .Lvaesenc_loop_tail_2__func1
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
cmpq $32,%rdx
jb .Lxor_one_block__func1
je .Lxor_two_blocks__func1
.Lxor_three_blocks__func1:
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%xmm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %xmm3,%xmm13,%xmm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %xmm13,32(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpshufb %xmm0,%xmm13,%xmm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%xmm3
vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm7,%ymm7
jmp .Lghash_mul_one_vec_unreduced__func1
.Lxor_two_blocks__func1:
vmovdqu (%rdi),%ymm2
vpxor %ymm2,%ymm12,%ymm12
vmovdqu %ymm12,(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
jmp .Lghash_mul_one_vec_unreduced__func1
.Lxor_one_block__func1:
vmovdqu (%rdi),%xmm2
vpxor %xmm2,%xmm12,%xmm12
vmovdqu %xmm12,(%rsi)
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm1,%xmm12,%xmm12
vmovdqu (%r8),%xmm2
.Lghash_mul_one_vec_unreduced__func1:
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x11
vpxor %ymm4,%ymm7,%ymm7
.Lreduce__func1:
vbroadcasti128 .Lgfpoly(%rip),%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xdd,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
.byte 0xc4,0xe3,0x6d,0x44,0xde,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm7,%ymm7
vpxor %ymm3,%ymm7,%ymm7
vextracti128 $1,%ymm7,%xmm1
vpxor %xmm7,%xmm1,%xmm1
.Ldone__func1:
vpshufb %xmm0,%xmm1,%xmm1
vmovdqu %xmm1,(%r12)
vzeroupper
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
ret
.cfi_endproc
.size aes_gcm_enc_update_vaes_avx2, . - aes_gcm_enc_update_vaes_avx2
.globl aes_gcm_dec_update_vaes_avx2
.hidden aes_gcm_dec_update_vaes_avx2
.type aes_gcm_dec_update_vaes_avx2,@function
.align 32
aes_gcm_dec_update_vaes_avx2:
.cfi_startproc
_CET_ENDBR
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
movq 16(%rsp),%r12
vbroadcasti128 .Lbswap_mask(%rip),%ymm0
vmovdqu (%r12),%xmm1
vpshufb %xmm0,%xmm1,%xmm1
vbroadcasti128 (%r8),%ymm11
vpshufb %ymm0,%ymm11,%ymm11
movl 240(%rcx),%r10d
leal -20(,%r10,4),%r10d
leaq 96(%rcx,%r10,4),%r11
vbroadcasti128 (%rcx),%ymm9
vbroadcasti128 (%r11),%ymm10
vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
cmpq $127,%rdx
jbe .Lcrypt_loop_4x_done__func2
vmovdqu 128(%r9),%ymm7
vmovdqu 128+32(%r9),%ymm8
.align 16
.Lcrypt_loop_4x__func2:
vmovdqu .Linc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
cmpl $24,%r10d
jl .Laes128__func2
je .Laes192__func2
vbroadcasti128 -208(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -192(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.Laes192__func2:
vbroadcasti128 -176(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -160(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.Laes128__func2:
prefetcht0 512(%rdi)
prefetcht0 512+64(%rdi)
vmovdqu 0(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xec,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcc,0x11
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00
vbroadcasti128 -144(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -128(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 32(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -112(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 64(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
vbroadcasti128 -96(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -80(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vbroadcasti128 -64(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 96(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -48(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 .Lgfpoly(%rip),%ymm4
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -32(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -16(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
.byte 0xc4,0x62,0x1d,0xdd,0xe2
.byte 0xc4,0x62,0x15,0xdd,0xeb
.byte 0xc4,0x62,0x0d,0xdd,0xf5
.byte 0xc4,0x62,0x05,0xdd,0xfe
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
subq $-128,%rsi
addq $-128,%rdx
cmpq $127,%rdx
ja .Lcrypt_loop_4x__func2
.Lcrypt_loop_4x_done__func2:
testq %rdx,%rdx
jz .Ldone__func2
leaq 128(%r9),%r8
subq %rdx,%r8
vpxor %xmm5,%xmm5,%xmm5
vpxor %xmm6,%xmm6,%xmm6
vpxor %xmm7,%xmm7,%xmm7
cmpq $64,%rdx
jb .Llessthan64bytes__func2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
.Lvaesenc_loop_tail_1__func2:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne .Lvaesenc_loop_tail_1__func2
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%ymm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %ymm3,%ymm13,%ymm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpshufb %ymm0,%ymm3,%ymm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%ymm3
.byte 0xc4,0xe3,0x1d,0x44,0xea,0x00
.byte 0xc4,0xe3,0x1d,0x44,0xf2,0x01
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xfa,0x11
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x11
vpxor %ymm4,%ymm7,%ymm7
addq $64,%r8
addq $64,%rdi
addq $64,%rsi
subq $64,%rdx
jz .Lreduce__func2
vpxor %xmm1,%xmm1,%xmm1
.Llessthan64bytes__func2:
vpshufb %ymm0,%ymm11,%ymm12
vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
.Lvaesenc_loop_tail_2__func2:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne .Lvaesenc_loop_tail_2__func2
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
cmpq $32,%rdx
jb .Lxor_one_block__func2
je .Lxor_two_blocks__func2
.Lxor_three_blocks__func2:
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%xmm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %xmm3,%xmm13,%xmm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %xmm13,32(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpshufb %xmm0,%xmm3,%xmm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%xmm3
vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm7,%ymm7
jmp .Lghash_mul_one_vec_unreduced__func2
.Lxor_two_blocks__func2:
vmovdqu (%rdi),%ymm2
vpxor %ymm2,%ymm12,%ymm12
vmovdqu %ymm12,(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
jmp .Lghash_mul_one_vec_unreduced__func2
.Lxor_one_block__func2:
vmovdqu (%rdi),%xmm2
vpxor %xmm2,%xmm12,%xmm12
vmovdqu %xmm12,(%rsi)
vpshufb %xmm0,%xmm2,%xmm12
vpxor %xmm1,%xmm12,%xmm12
vmovdqu (%r8),%xmm2
.Lghash_mul_one_vec_unreduced__func2:
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x11
vpxor %ymm4,%ymm7,%ymm7
.Lreduce__func2:
vbroadcasti128 .Lgfpoly(%rip),%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xdd,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
.byte 0xc4,0xe3,0x6d,0x44,0xde,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm7,%ymm7
vpxor %ymm3,%ymm7,%ymm7
vextracti128 $1,%ymm7,%xmm1
vpxor %xmm7,%xmm1,%xmm1
.Ldone__func2:
vpshufb %xmm0,%xmm1,%xmm1
vmovdqu %xmm1,(%r12)
vzeroupper
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
ret
.cfi_endproc
.size aes_gcm_dec_update_vaes_avx2, . - aes_gcm_dec_update_vaes_avx2
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 35,655
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/p256-armv8-asm-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.section .rodata
.align 5
Lpoly:
.quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001
LRR: // 2^512 mod P precomputed for NIST P256 polynomial
.quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd
Lone_mont:
.quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
Lone:
.quad 1,0,0,0
Lord:
.quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000
LordK:
.quad 0xccd1c8aaee00bc4f
.byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
// void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
// const BN_ULONG x2[4]);
.globl ecp_nistz256_mul_mont
.def ecp_nistz256_mul_mont
.type 32
.endef
.align 4
ecp_nistz256_mul_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_mul_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_sqr_mont
.def ecp_nistz256_sqr_mont
.type 32
.endef
.align 4
ecp_nistz256_sqr_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sqr_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_neg
.def ecp_nistz256_neg
.type 32
.endef
.align 4
ecp_nistz256_neg:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x2,x1
mov x14,xzr // a = 0
mov x15,xzr
mov x16,xzr
mov x17,xzr
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sub_from
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
// note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
// to x4-x7 and b[0] - to x3
.def __ecp_nistz256_mul_mont
.type 32
.endef
.align 4
__ecp_nistz256_mul_mont:
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x11,x7,x3
ldr x3,[x2,#8] // b[1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adc x19,xzr,x11
mov x20,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(1+1)] // b[1+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(2+1)] // b[2+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
// last reduction
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adcs x17,x19,x11
adc x19,x20,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
// note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
// to x4-x7
.def __ecp_nistz256_sqr_mont
.type 32
.endef
.align 4
__ecp_nistz256_sqr_mont:
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x2,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
lsl x8,x14,#32
adcs x1,x1,x11
lsr x9,x14,#32
adc x2,x2,x7
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adc x17,x11,xzr // can't overflow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x2
adc x19,xzr,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
// Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to
// x4-x7 and x8-x11. This is done because it's used in multiple
// contexts, e.g. in multiplication by 2 and 3...
.def __ecp_nistz256_add_to
.type 32
.endef
.align 4
__ecp_nistz256_add_to:
adds x14,x14,x8 // ret = a+b
adcs x15,x15,x9
adcs x16,x16,x10
adcs x17,x17,x11
adc x1,xzr,xzr // zap x1
adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x1,xzr // did subtraction borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
.def __ecp_nistz256_sub_from
.type 32
.endef
.align 4
__ecp_nistz256_sub_from:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x14,x8 // ret = a-b
sbcs x15,x15,x9
sbcs x16,x16,x10
sbcs x17,x17,x11
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.def __ecp_nistz256_sub_morf
.type 32
.endef
.align 4
__ecp_nistz256_sub_morf:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x8,x14 // ret = b-a
sbcs x15,x9,x15
sbcs x16,x10,x16
sbcs x17,x11,x17
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.def __ecp_nistz256_div_by_2
.type 32
.endef
.align 4
__ecp_nistz256_div_by_2:
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adcs x11,x17,x13
adc x1,xzr,xzr // zap x1
tst x14,#1 // is a even?
csel x14,x14,x8,eq // ret = even ? a : a+modulus
csel x15,x15,x9,eq
csel x16,x16,x10,eq
csel x17,x17,x11,eq
csel x1,xzr,x1,eq
lsr x14,x14,#1 // ret >>= 1
orr x14,x14,x15,lsl#63
lsr x15,x15,#1
orr x15,x15,x16,lsl#63
lsr x16,x16,#1
orr x16,x16,x17,lsl#63
lsr x17,x17,#1
stp x14,x15,[x0]
orr x17,x17,x1,lsl#63
stp x16,x17,[x0,#16]
ret
.globl ecp_nistz256_point_double
.def ecp_nistz256_point_double
.type 32
.endef
.align 5
ecp_nistz256_point_double:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
sub sp,sp,#32*4
Ldouble_shortcut:
ldp x14,x15,[x1,#32]
mov x21,x0
ldp x16,x17,[x1,#48]
mov x22,x1
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
mov x8,x14
ldr x13,[x13,#24]
mov x9,x15
ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[x22,#64+16]
add x0,sp,#0
bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y);
add x0,sp,#64
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z);
ldp x8,x9,[x22]
ldp x10,x11,[x22,#16]
mov x4,x14 // put Zsqr aside for p256_sub
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x);
add x2,x22,#0
mov x14,x4 // restore Zsqr
mov x15,x5
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x16,x6
mov x17,x7
ldp x6,x7,[sp,#0+16]
add x0,sp,#64
bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr);
add x0,sp,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S);
ldr x3,[x22,#32]
ldp x4,x5,[x22,#64]
ldp x6,x7,[x22,#64+16]
add x2,x22,#32
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#0+16]
add x0,x21,#64
bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0);
add x0,sp,#96
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S);
ldr x3,[sp,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x0,x21,#32
bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0);
add x2,sp,#64
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr);
mov x8,x14 // duplicate M
mov x9,x15
mov x10,x16
mov x11,x17
mov x4,x14 // put M aside
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to
mov x8,x4 // restore M
mov x9,x5
ldr x3,[x22] // forward load for p256_mul_mont
mov x10,x6
ldp x4,x5,[sp,#0]
mov x11,x7
ldp x6,x7,[sp,#0+16]
bl __ecp_nistz256_add_to // p256_mul_by_3(M, M);
add x2,x22,#0
add x0,sp,#0
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#32+16]
add x0,sp,#96
bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S);
add x0,x21,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M);
add x2,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0);
add x2,sp,#0
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x);
ldr x3,[sp,#32]
mov x4,x14 // copy S
mov x5,x15
mov x6,x16
mov x7,x17
add x2,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M);
add x2,x21,#32
add x0,x21,#32
bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y);
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl ecp_nistz256_point_add
.def ecp_nistz256_point_add
.type 32
.endef
.align 5
ecp_nistz256_point_add:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#32*12
ldp x4,x5,[x2,#64] // in2_z
ldp x6,x7,[x2,#64+16]
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
orr x8,x4,x5
orr x10,x6,x7
orr x25,x8,x10
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z);
ldp x4,x5,[x22,#64] // in1_z
ldp x6,x7,[x22,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x2,x23,#64
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x22,#64
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#32]
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x2,x22,#32
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#352]
ldp x6,x7,[sp,#352+16]
add x2,x23,#32
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,sp,#320
ldr x3,[sp,#192] // forward load for p256_mul_mont
ldp x4,x5,[x22]
ldp x6,x7,[x22,#16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x26,x14,x16 // ~is_equal(S1,S2)
add x2,sp,#192
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr);
ldr x3,[sp,#128]
ldp x4,x5,[x23]
ldp x6,x7,[x23,#16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr);
add x2,sp,#256
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x14,x14,x16 // ~is_equal(U1,U2)
mvn x27,x24 // -1/0 -> 0/-1
mvn x28,x25 // -1/0 -> 0/-1
orr x14,x14,x27
orr x14,x14,x28
orr x14,x14,x26
cbnz x14,Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2))
Ladd_double:
mov x1,x22
mov x0,x21
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames
b Ldouble_shortcut
.align 4
Ladd_proceed:
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#64]
ldp x6,x7,[sp,#64+16]
add x2,x23,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z);
ldr x3,[sp,#96]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,sp,#96
add x0,sp,#224
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[sp,#128]
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#128
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#192
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#224
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#288
ldr x3,[sp,#224] // forward load for p256_mul_mont
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,sp,#224
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#160
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#352
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
Ladd_done:
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl ecp_nistz256_point_add_affine
.def ecp_nistz256_point_add_affine
.type 32
.endef
.align 5
ecp_nistz256_point_add_affine:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-80]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
sub sp,sp,#32*10
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
ldp x4,x5,[x1,#64] // in1_z
ldp x6,x7,[x1,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
ldp x14,x15,[x2] // in2_x
ldp x16,x17,[x2,#16]
ldp x8,x9,[x2,#32] // in2_y
ldp x10,x11,[x2,#48]
orr x14,x14,x15
orr x16,x16,x17
orr x8,x8,x9
orr x10,x10,x11
orr x14,x14,x16
orr x8,x8,x10
orr x25,x14,x8
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
mov x4,x14
mov x5,x15
mov x6,x16
mov x7,x17
ldr x3,[x23]
add x2,x23,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x);
add x2,x22,#0
ldr x3,[x22,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x);
add x2,x22,#64
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#160]
ldp x6,x7,[sp,#160+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x23,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,x22,#32
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#192
bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y);
add x0,sp,#224
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x0,sp,#288
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,sp,#160
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[x22]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,x22,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#224
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#288
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#256
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#96
ldr x3,[x22,#32] // forward load for p256_mul_mont
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,x22,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub);
ldr x3,[sp,#192]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#192
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#128
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
adrp x23,Lone_mont-64
add x23,x23,:lo12:Lone_mont-64
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x29,x30,[sp],#80
AARCH64_VALIDATE_LINK_REGISTER
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
// uint64_t b[4]);
.globl ecp_nistz256_ord_mul_mont
.def ecp_nistz256_ord_mul_mont
.type 32
.endef
.align 4
ecp_nistz256_ord_mul_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,Lord
add x23,x23,:lo12:Lord
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x19,x7,x3
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts of multiplication
adcs x16,x16,x9
adcs x17,x17,x10
adc x19,x19,xzr
mov x20,xzr
ldr x3,[x2,#8*1] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*2] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*3] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
lsl x8,x24,#32 // last reduction
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
// uint64_t rep);
.globl ecp_nistz256_ord_sqr_mont
.def ecp_nistz256_ord_sqr_mont
.type 32
.endef
.align 4
ecp_nistz256_ord_sqr_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,Lord
add x23,x23,:lo12:Lord
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
b Loop_ord_sqr
.align 4
Loop_ord_sqr:
sub x2,x2,#1
////////////////////////////////////////////////////////////////
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x3,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
mul x24,x14,x23
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
adcs x1,x1,x11
adc x3,x3,x7
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
mul x24,x14,x23
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x3
adc x19,xzr,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x5,x15,x9,lo
csel x6,x16,x10,lo
csel x7,x17,x11,lo
cbnz x2,Loop_ord_sqr
stp x4,x5,[x0]
stp x6,x7,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index);
.globl ecp_nistz256_select_w5
.def ecp_nistz256_select_w5
.type 32
.endef
.align 4
ecp_nistz256_select_w5:
AARCH64_VALID_CALL_TARGET
// x10 := x0
// w9 := 0; loop counter and incremented internal index
mov x10, x0
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
movi v20.16b, #0
movi v21.16b, #0
Lselect_w5_loop:
// Loop 16 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// continue loading ...
ld1 {v26.2d, v27.2d}, [x1],#32
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
bit v20.16b, v26.16b, v3.16b
bit v21.16b, v27.16b, v3.16b
// If bit #4 is not 0 (i.e. idx_ctr < 16) loop back
tbz w9, #4, Lselect_w5_loop
// Write [v16-v21] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64
st1 {v20.2d, v21.2d}, [x10]
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index);
.globl ecp_nistz256_select_w7
.def ecp_nistz256_select_w7
.type 32
.endef
.align 4
ecp_nistz256_select_w7:
AARCH64_VALID_CALL_TARGET
// w9 := 0; loop counter and incremented internal index
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
Lselect_w7_loop:
// Loop 64 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
// If bit #6 is not 0 (i.e. idx_ctr < 64) loop back
tbz w9, #6, Lselect_w7_loop
// Write [v16-v19] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0]
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mktmansour/MKT-KSA-Geolocation-Security
| 21,961
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghash-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,@function
.align 16
gcm_init_clmul:
.cfi_startproc
_CET_ENDBR
.L_init_clmul:
movdqu (%rsi),%xmm2
pshufd $78,%xmm2,%xmm2
pshufd $255,%xmm2,%xmm4
movdqa %xmm2,%xmm3
psllq $1,%xmm2
pxor %xmm5,%xmm5
psrlq $63,%xmm3
pcmpgtd %xmm4,%xmm5
pslldq $8,%xmm3
por %xmm3,%xmm2
pand .L0x1c2_polynomial(%rip),%xmm5
pxor %xmm5,%xmm2
pshufd $78,%xmm2,%xmm6
movdqa %xmm2,%xmm0
pxor %xmm2,%xmm6
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm2,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm2,%xmm3
movdqu %xmm2,0(%rdi)
pxor %xmm0,%xmm4
movdqu %xmm0,16(%rdi)
.byte 102,15,58,15,227,8
movdqu %xmm4,32(%rdi)
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
movdqa %xmm0,%xmm5
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm5,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm5,%xmm3
movdqu %xmm5,48(%rdi)
pxor %xmm0,%xmm4
movdqu %xmm0,64(%rdi)
.byte 102,15,58,15,227,8
movdqu %xmm4,80(%rdi)
ret
.cfi_endproc
.size gcm_init_clmul,.-gcm_init_clmul
.globl gcm_ghash_clmul
.hidden gcm_ghash_clmul
.type gcm_ghash_clmul,@function
.align 32
gcm_ghash_clmul:
.cfi_startproc
_CET_ENDBR
.L_ghash_clmul:
movdqa .Lbswap_mask(%rip),%xmm10
movdqu (%rdi),%xmm0
movdqu (%rsi),%xmm2
movdqu 32(%rsi),%xmm7
.byte 102,65,15,56,0,194
subq $0x10,%rcx
jz .Lodd_tail
movdqu 16(%rsi),%xmm6
cmpq $0x30,%rcx
jb .Lskip4x
subq $0x30,%rcx
movq $0xA040608020C0E000,%rax
movdqu 48(%rsi),%xmm14
movdqu 64(%rsi),%xmm15
movdqu 48(%rdx),%xmm3
movdqu 32(%rdx),%xmm11
.byte 102,65,15,56,0,218
.byte 102,69,15,56,0,218
movdqa %xmm3,%xmm5
pshufd $78,%xmm3,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,68,218,0
.byte 102,15,58,68,234,17
.byte 102,15,58,68,231,0
movdqa %xmm11,%xmm13
pshufd $78,%xmm11,%xmm12
pxor %xmm11,%xmm12
.byte 102,68,15,58,68,222,0
.byte 102,68,15,58,68,238,17
.byte 102,68,15,58,68,231,16
xorps %xmm11,%xmm3
xorps %xmm13,%xmm5
movups 80(%rsi),%xmm7
xorps %xmm12,%xmm4
movdqu 16(%rdx),%xmm11
movdqu 0(%rdx),%xmm8
.byte 102,69,15,56,0,218
.byte 102,69,15,56,0,194
movdqa %xmm11,%xmm13
pshufd $78,%xmm11,%xmm12
pxor %xmm8,%xmm0
pxor %xmm11,%xmm12
.byte 102,69,15,58,68,222,0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm8
pxor %xmm0,%xmm8
.byte 102,69,15,58,68,238,17
.byte 102,68,15,58,68,231,0
xorps %xmm11,%xmm3
xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx
subq $0x40,%rcx
jc .Ltail4x
jmp .Lmod4_loop
.align 32
.Lmod4_loop:
.byte 102,65,15,58,68,199,0
xorps %xmm12,%xmm4
movdqu 48(%rdx),%xmm11
.byte 102,69,15,56,0,218
.byte 102,65,15,58,68,207,17
xorps %xmm3,%xmm0
movdqu 32(%rdx),%xmm3
movdqa %xmm11,%xmm13
.byte 102,68,15,58,68,199,16
pshufd $78,%xmm11,%xmm12
xorps %xmm5,%xmm1
pxor %xmm11,%xmm12
.byte 102,65,15,56,0,218
movups 32(%rsi),%xmm7
xorps %xmm4,%xmm8
.byte 102,68,15,58,68,218,0
pshufd $78,%xmm3,%xmm4
pxor %xmm0,%xmm8
movdqa %xmm3,%xmm5
pxor %xmm1,%xmm8
pxor %xmm3,%xmm4
movdqa %xmm8,%xmm9
.byte 102,68,15,58,68,234,17
pslldq $8,%xmm8
psrldq $8,%xmm9
pxor %xmm8,%xmm0
movdqa .L7_mask(%rip),%xmm8
pxor %xmm9,%xmm1
.byte 102,76,15,110,200
pand %xmm0,%xmm8
.byte 102,69,15,56,0,200
pxor %xmm0,%xmm9
.byte 102,68,15,58,68,231,0
psllq $57,%xmm9
movdqa %xmm9,%xmm8
pslldq $8,%xmm9
.byte 102,15,58,68,222,0
psrldq $8,%xmm8
pxor %xmm9,%xmm0
pxor %xmm8,%xmm1
movdqu 0(%rdx),%xmm8
movdqa %xmm0,%xmm9
psrlq $1,%xmm0
.byte 102,15,58,68,238,17
xorps %xmm11,%xmm3
movdqu 16(%rdx),%xmm11
.byte 102,69,15,56,0,218
.byte 102,15,58,68,231,16
xorps %xmm13,%xmm5
movups 80(%rsi),%xmm7
.byte 102,69,15,56,0,194
pxor %xmm9,%xmm1
pxor %xmm0,%xmm9
psrlq $5,%xmm0
movdqa %xmm11,%xmm13
pxor %xmm12,%xmm4
pshufd $78,%xmm11,%xmm12
pxor %xmm9,%xmm0
pxor %xmm8,%xmm1
pxor %xmm11,%xmm12
.byte 102,69,15,58,68,222,0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
movdqa %xmm0,%xmm1
.byte 102,69,15,58,68,238,17
xorps %xmm11,%xmm3
pshufd $78,%xmm0,%xmm8
pxor %xmm0,%xmm8
.byte 102,68,15,58,68,231,0
xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx
subq $0x40,%rcx
jnc .Lmod4_loop
.Ltail4x:
.byte 102,65,15,58,68,199,0
.byte 102,65,15,58,68,207,17
.byte 102,68,15,58,68,199,16
xorps %xmm12,%xmm4
xorps %xmm3,%xmm0
xorps %xmm5,%xmm1
pxor %xmm0,%xmm1
pxor %xmm4,%xmm8
pxor %xmm1,%xmm8
pxor %xmm0,%xmm1
movdqa %xmm8,%xmm9
psrldq $8,%xmm8
pslldq $8,%xmm9
pxor %xmm8,%xmm1
pxor %xmm9,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
addq $0x40,%rcx
jz .Ldone
movdqu 32(%rsi),%xmm7
subq $0x10,%rcx
jz .Lodd_tail
.Lskip4x:
movdqu (%rdx),%xmm8
movdqu 16(%rdx),%xmm3
.byte 102,69,15,56,0,194
.byte 102,65,15,56,0,218
pxor %xmm8,%xmm0
movdqa %xmm3,%xmm5
pshufd $78,%xmm3,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,68,218,0
.byte 102,15,58,68,234,17
.byte 102,15,58,68,231,0
leaq 32(%rdx),%rdx
nop
subq $0x20,%rcx
jbe .Leven_tail
nop
jmp .Lmod_loop
.align 32
.Lmod_loop:
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm8
pshufd $78,%xmm0,%xmm4
pxor %xmm0,%xmm4
.byte 102,15,58,68,198,0
.byte 102,15,58,68,206,17
.byte 102,15,58,68,231,16
pxor %xmm3,%xmm0
pxor %xmm5,%xmm1
movdqu (%rdx),%xmm9
pxor %xmm0,%xmm8
.byte 102,69,15,56,0,202
movdqu 16(%rdx),%xmm3
pxor %xmm1,%xmm8
pxor %xmm9,%xmm1
pxor %xmm8,%xmm4
.byte 102,65,15,56,0,218
movdqa %xmm4,%xmm8
psrldq $8,%xmm8
pslldq $8,%xmm4
pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm3,%xmm5
movdqa %xmm0,%xmm9
movdqa %xmm0,%xmm8
psllq $5,%xmm0
pxor %xmm0,%xmm8
.byte 102,15,58,68,218,0
psllq $1,%xmm0
pxor %xmm8,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm8
pslldq $8,%xmm0
psrldq $8,%xmm8
pxor %xmm9,%xmm0
pshufd $78,%xmm5,%xmm4
pxor %xmm8,%xmm1
pxor %xmm5,%xmm4
movdqa %xmm0,%xmm9
psrlq $1,%xmm0
.byte 102,15,58,68,234,17
pxor %xmm9,%xmm1
pxor %xmm0,%xmm9
psrlq $5,%xmm0
pxor %xmm9,%xmm0
leaq 32(%rdx),%rdx
psrlq $1,%xmm0
.byte 102,15,58,68,231,0
pxor %xmm1,%xmm0
subq $0x20,%rcx
ja .Lmod_loop
.Leven_tail:
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm8
pshufd $78,%xmm0,%xmm4
pxor %xmm0,%xmm4
.byte 102,15,58,68,198,0
.byte 102,15,58,68,206,17
.byte 102,15,58,68,231,16
pxor %xmm3,%xmm0
pxor %xmm5,%xmm1
pxor %xmm0,%xmm8
pxor %xmm1,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm8
psrldq $8,%xmm8
pslldq $8,%xmm4
pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
testq %rcx,%rcx
jnz .Ldone
.Lodd_tail:
movdqu (%rdx),%xmm8
.byte 102,69,15,56,0,194
pxor %xmm8,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,223,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.Ldone:
.byte 102,65,15,56,0,194
movdqu %xmm0,(%rdi)
ret
.cfi_endproc
.size gcm_ghash_clmul,.-gcm_ghash_clmul
.globl gcm_init_avx
.hidden gcm_init_avx
.type gcm_init_avx,@function
.align 32
gcm_init_avx:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqu (%rsi),%xmm2
vpshufd $78,%xmm2,%xmm2
vpshufd $255,%xmm2,%xmm4
vpsrlq $63,%xmm2,%xmm3
vpsllq $1,%xmm2,%xmm2
vpxor %xmm5,%xmm5,%xmm5
vpcmpgtd %xmm4,%xmm5,%xmm5
vpslldq $8,%xmm3,%xmm3
vpor %xmm3,%xmm2,%xmm2
vpand .L0x1c2_polynomial(%rip),%xmm5,%xmm5
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm2,%xmm2,%xmm6
vmovdqa %xmm2,%xmm0
vpxor %xmm2,%xmm6,%xmm6
movq $4,%r10
jmp .Linit_start_avx
.align 32
.Linit_loop_avx:
vpalignr $8,%xmm3,%xmm4,%xmm5
vmovdqu %xmm5,-16(%rdi)
vpunpckhqdq %xmm0,%xmm0,%xmm3
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3
vpxor %xmm0,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $8,%xmm3,%xmm4
vpsrldq $8,%xmm3,%xmm3
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm3,%xmm1,%xmm1
vpsllq $57,%xmm0,%xmm3
vpsllq $62,%xmm0,%xmm4
vpxor %xmm3,%xmm4,%xmm4
vpsllq $63,%xmm0,%xmm3
vpxor %xmm3,%xmm4,%xmm4
vpslldq $8,%xmm4,%xmm3
vpsrldq $8,%xmm4,%xmm4
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm4,%xmm1,%xmm1
vpsrlq $1,%xmm0,%xmm4
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $5,%xmm4,%xmm4
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $1,%xmm0,%xmm0
vpxor %xmm1,%xmm0,%xmm0
.Linit_start_avx:
vmovdqa %xmm0,%xmm5
vpunpckhqdq %xmm0,%xmm0,%xmm3
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3
vpxor %xmm0,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $8,%xmm3,%xmm4
vpsrldq $8,%xmm3,%xmm3
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm3,%xmm1,%xmm1
vpsllq $57,%xmm0,%xmm3
vpsllq $62,%xmm0,%xmm4
vpxor %xmm3,%xmm4,%xmm4
vpsllq $63,%xmm0,%xmm3
vpxor %xmm3,%xmm4,%xmm4
vpslldq $8,%xmm4,%xmm3
vpsrldq $8,%xmm4,%xmm4
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm4,%xmm1,%xmm1
vpsrlq $1,%xmm0,%xmm4
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $5,%xmm4,%xmm4
vpxor %xmm4,%xmm0,%xmm0
vpsrlq $1,%xmm0,%xmm0
vpxor %xmm1,%xmm0,%xmm0
vpshufd $78,%xmm5,%xmm3
vpshufd $78,%xmm0,%xmm4
vpxor %xmm5,%xmm3,%xmm3
vmovdqu %xmm5,0(%rdi)
vpxor %xmm0,%xmm4,%xmm4
vmovdqu %xmm0,16(%rdi)
leaq 48(%rdi),%rdi
subq $1,%r10
jnz .Linit_loop_avx
vpalignr $8,%xmm4,%xmm3,%xmm5
vmovdqu %xmm5,-16(%rdi)
vzeroupper
ret
.cfi_endproc
.size gcm_init_avx,.-gcm_init_avx
.globl gcm_ghash_avx
.hidden gcm_ghash_avx
.type gcm_ghash_avx,@function
.align 32
gcm_ghash_avx:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqu (%rdi),%xmm10
leaq .L0x1c2_polynomial(%rip),%r10
leaq 64(%rsi),%rsi
vmovdqu .Lbswap_mask(%rip),%xmm13
vpshufb %xmm13,%xmm10,%xmm10
cmpq $0x80,%rcx
jb .Lshort_avx
subq $0x80,%rcx
vmovdqu 112(%rdx),%xmm14
vmovdqu 0-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm14
vmovdqu 32-64(%rsi),%xmm7
vpunpckhqdq %xmm14,%xmm14,%xmm9
vmovdqu 96(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm14,%xmm9,%xmm9
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 16-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vmovdqu 80(%rdx),%xmm14
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 48-64(%rsi),%xmm6
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 64(%rdx),%xmm15
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 80-64(%rsi),%xmm7
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vmovdqu 48(%rdx),%xmm14
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm4,%xmm1,%xmm1
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 96-64(%rsi),%xmm6
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 128-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 32(%rdx),%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vmovdqu 16(%rdx),%xmm14
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm4,%xmm1,%xmm1
vpshufb %xmm13,%xmm14,%xmm14
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 144-64(%rsi),%xmm6
vpxor %xmm5,%xmm2,%xmm2
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 176-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu (%rdx),%xmm15
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm1,%xmm4,%xmm4
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 160-64(%rsi),%xmm6
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2
leaq 128(%rdx),%rdx
cmpq $0x80,%rcx
jb .Ltail_avx
vpxor %xmm10,%xmm15,%xmm15
subq $0x80,%rcx
jmp .Loop8x_avx
.align 32
.Loop8x_avx:
vpunpckhqdq %xmm15,%xmm15,%xmm8
vmovdqu 112(%rdx),%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpxor %xmm15,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm10
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm11
vmovdqu 0-64(%rsi),%xmm6
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm12
vmovdqu 32-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vmovdqu 96(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpxor %xmm3,%xmm10,%xmm10
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vxorps %xmm4,%xmm11,%xmm11
vmovdqu 16-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm5,%xmm12,%xmm12
vxorps %xmm15,%xmm8,%xmm8
vmovdqu 80(%rdx),%xmm14
vpxor %xmm10,%xmm12,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpxor %xmm11,%xmm12,%xmm12
vpslldq $8,%xmm12,%xmm9
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vpsrldq $8,%xmm12,%xmm12
vpxor %xmm9,%xmm10,%xmm10
vmovdqu 48-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm14
vxorps %xmm12,%xmm11,%xmm11
vpxor %xmm1,%xmm4,%xmm4
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 80-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 64(%rdx),%xmm15
vpalignr $8,%xmm10,%xmm10,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vxorps %xmm15,%xmm8,%xmm8
vpxor %xmm5,%xmm2,%xmm2
vmovdqu 48(%rdx),%xmm14
vpclmulqdq $0x10,(%r10),%xmm10,%xmm10
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 96-64(%rsi),%xmm6
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 128-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 32(%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpxor %xmm3,%xmm0,%xmm0
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2
vpxor %xmm15,%xmm8,%xmm8
vpxor %xmm5,%xmm2,%xmm2
vxorps %xmm12,%xmm10,%xmm10
vmovdqu 16(%rdx),%xmm14
vpalignr $8,%xmm10,%xmm10,%xmm12
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3
vpshufb %xmm13,%xmm14,%xmm14
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4
vmovdqu 144-64(%rsi),%xmm6
vpclmulqdq $0x10,(%r10),%xmm10,%xmm10
vxorps %xmm11,%xmm12,%xmm12
vpunpckhqdq %xmm14,%xmm14,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5
vmovdqu 176-64(%rsi),%xmm7
vpxor %xmm14,%xmm9,%xmm9
vpxor %xmm2,%xmm5,%xmm5
vmovdqu (%rdx),%xmm15
vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0
vpshufb %xmm13,%xmm15,%xmm15
vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1
vmovdqu 160-64(%rsi),%xmm6
vpxor %xmm12,%xmm15,%xmm15
vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2
vpxor %xmm10,%xmm15,%xmm15
leaq 128(%rdx),%rdx
subq $0x80,%rcx
jnc .Loop8x_avx
addq $0x80,%rcx
jmp .Ltail_no_xor_avx
.align 32
.Lshort_avx:
vmovdqu -16(%rdx,%rcx,1),%xmm14
leaq (%rdx,%rcx,1),%rdx
vmovdqu 0-64(%rsi),%xmm6
vmovdqu 32-64(%rsi),%xmm7
vpshufb %xmm13,%xmm14,%xmm15
vmovdqa %xmm0,%xmm3
vmovdqa %xmm1,%xmm4
vmovdqa %xmm2,%xmm5
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -32(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 16-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -48(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 48-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu 80-64(%rsi),%xmm7
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -64(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 64-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -80(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 96-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu 128-64(%rsi),%xmm7
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -96(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 112-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vpsrldq $8,%xmm7,%xmm7
subq $0x10,%rcx
jz .Ltail_avx
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vmovdqu -112(%rdx),%xmm14
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vmovdqu 144-64(%rsi),%xmm6
vpshufb %xmm13,%xmm14,%xmm15
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovq 184-64(%rsi),%xmm7
subq $0x10,%rcx
jmp .Ltail_avx
.align 32
.Ltail_avx:
vpxor %xmm10,%xmm15,%xmm15
.Ltail_no_xor_avx:
vpunpckhqdq %xmm15,%xmm15,%xmm8
vpxor %xmm0,%xmm3,%xmm3
vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0
vpxor %xmm15,%xmm8,%xmm8
vpxor %xmm1,%xmm4,%xmm4
vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1
vpxor %xmm2,%xmm5,%xmm5
vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2
vmovdqu (%r10),%xmm12
vpxor %xmm0,%xmm3,%xmm10
vpxor %xmm1,%xmm4,%xmm11
vpxor %xmm2,%xmm5,%xmm5
vpxor %xmm10,%xmm5,%xmm5
vpxor %xmm11,%xmm5,%xmm5
vpslldq $8,%xmm5,%xmm9
vpsrldq $8,%xmm5,%xmm5
vpxor %xmm9,%xmm10,%xmm10
vpxor %xmm5,%xmm11,%xmm11
vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9
vpalignr $8,%xmm10,%xmm10,%xmm10
vpxor %xmm9,%xmm10,%xmm10
vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9
vpalignr $8,%xmm10,%xmm10,%xmm10
vpxor %xmm11,%xmm10,%xmm10
vpxor %xmm9,%xmm10,%xmm10
cmpq $0,%rcx
jne .Lshort_avx
vpshufb %xmm13,%xmm10,%xmm10
vmovdqu %xmm10,(%rdi)
vzeroupper
ret
.cfi_endproc
.size gcm_ghash_avx,.-gcm_ghash_avx
.section .rodata
.align 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.L0x1c2_polynomial:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
.L7_mask:
.long 7,0,7,0
.align 64
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.text
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 12,170
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/vpaes-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.type _vpaes_encrypt_core,@function
.align 16
_vpaes_encrypt_core:
.cfi_startproc
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa .Lk_ipt(%rip),%xmm2
pandn %xmm0,%xmm1
movdqu (%r9),%xmm5
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,208
movdqa .Lk_ipt+16(%rip),%xmm0
.byte 102,15,56,0,193
pxor %xmm5,%xmm2
addq $16,%r9
pxor %xmm2,%xmm0
leaq .Lk_mc_backward(%rip),%r10
jmp .Lenc_entry
.align 16
.Lenc_loop:
movdqa %xmm13,%xmm4
movdqa %xmm12,%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa %xmm15,%xmm5
pxor %xmm4,%xmm0
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
movdqa (%r11,%r10,1),%xmm4
movdqa %xmm14,%xmm2
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addq $16,%r9
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
.Lenc_entry:
movdqa %xmm9,%xmm1
movdqa %xmm11,%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,232
movdqa %xmm10,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm10,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm10,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm10,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
jnz .Lenc_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.cfi_endproc
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.type _vpaes_encrypt_core_2x,@function
.align 16
_vpaes_encrypt_core_2x:
.cfi_startproc
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa .Lk_ipt(%rip),%xmm2
movdqa %xmm2,%xmm8
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
movdqu (%r9),%xmm5
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,208
.byte 102,68,15,56,0,198
movdqa .Lk_ipt+16(%rip),%xmm0
movdqa %xmm0,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,247
pxor %xmm5,%xmm2
pxor %xmm5,%xmm8
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
leaq .Lk_mc_backward(%rip),%r10
jmp .Lenc2x_entry
.align 16
.Lenc2x_loop:
movdqa .Lk_sb1(%rip),%xmm4
movdqa .Lk_sb1+16(%rip),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
movdqa .Lk_sb2(%rip),%xmm5
movdqa %xmm5,%xmm13
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
.byte 102,69,15,56,0,232
movdqa (%r11,%r10,1),%xmm4
movdqa .Lk_sb2+16(%rip),%xmm2
movdqa %xmm2,%xmm8
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm0,%xmm3
movdqa %xmm6,%xmm11
pxor %xmm5,%xmm2
pxor %xmm13,%xmm8
.byte 102,15,56,0,193
.byte 102,15,56,0,241
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
.byte 102,15,56,0,220
.byte 102,68,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
pxor %xmm6,%xmm11
.byte 102,15,56,0,193
.byte 102,15,56,0,241
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
pxor %xmm11,%xmm6
.Lenc2x_entry:
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa .Lk_inv+16(%rip),%xmm5
movdqa %xmm5,%xmm13
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,232
.byte 102,68,15,56,0,238
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm1,%xmm0
pxor %xmm7,%xmm6
.byte 102,15,56,0,217
.byte 102,68,15,56,0,223
movdqa %xmm10,%xmm4
movdqa %xmm10,%xmm12
pxor %xmm5,%xmm3
pxor %xmm13,%xmm11
.byte 102,15,56,0,224
.byte 102,68,15,56,0,230
movdqa %xmm10,%xmm2
movdqa %xmm10,%xmm8
pxor %xmm5,%xmm4
pxor %xmm13,%xmm12
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm0,%xmm2
pxor %xmm6,%xmm8
.byte 102,15,56,0,220
.byte 102,69,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
pxor %xmm7,%xmm11
jnz .Lenc2x_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
ret
.cfi_endproc
.size _vpaes_encrypt_core_2x,.-_vpaes_encrypt_core_2x
.type _vpaes_schedule_core,@function
.align 16
_vpaes_schedule_core:
.cfi_startproc
call _vpaes_preheat
movdqa .Lk_rcon(%rip),%xmm8
movdqu (%rdi),%xmm0
movdqa %xmm0,%xmm3
leaq .Lk_ipt(%rip),%r11
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
leaq .Lk_sr(%rip),%r10
movdqu %xmm0,(%rdx)
.Lschedule_go:
cmpl $192,%esi
ja .Lschedule_256
.Lschedule_128:
movl $10,%esi
.Loop_schedule_128:
call _vpaes_schedule_round
decq %rsi
jz .Lschedule_mangle_last
call _vpaes_schedule_mangle
jmp .Loop_schedule_128
.align 16
.Lschedule_256:
movdqu 16(%rdi),%xmm0
call _vpaes_schedule_transform
movl $7,%esi
.Loop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decq %rsi
jz .Lschedule_mangle_last
call _vpaes_schedule_mangle
pshufd $0xFF,%xmm0,%xmm0
movdqa %xmm7,%xmm5
movdqa %xmm6,%xmm7
call _vpaes_schedule_low_round
movdqa %xmm5,%xmm7
jmp .Loop_schedule_256
.align 16
.Lschedule_mangle_last:
leaq .Lk_deskew(%rip),%r11
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,193
leaq .Lk_opt(%rip),%r11
addq $32,%rdx
.Lschedule_mangle_last_dec:
addq $-16,%rdx
pxor .Lk_s63(%rip),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%rdx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.cfi_endproc
.size _vpaes_schedule_core,.-_vpaes_schedule_core
.type _vpaes_schedule_round,@function
.align 16
_vpaes_schedule_round:
.cfi_startproc
pxor %xmm1,%xmm1
.byte 102,65,15,58,15,200,15
.byte 102,69,15,58,15,192,15
pxor %xmm1,%xmm7
pshufd $0xFF,%xmm0,%xmm0
.byte 102,15,58,15,192,1
_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor .Lk_s63(%rip),%xmm7
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa %xmm11,%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm10,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm10,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm10,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm10,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa %xmm13,%xmm4
.byte 102,15,56,0,226
movdqa %xmm12,%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.cfi_endproc
.size _vpaes_schedule_round,.-_vpaes_schedule_round
.type _vpaes_schedule_transform,@function
.align 16
_vpaes_schedule_transform:
.cfi_startproc
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa (%r11),%xmm2
.byte 102,15,56,0,208
movdqa 16(%r11),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.cfi_endproc
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
.type _vpaes_schedule_mangle,@function
.align 16
_vpaes_schedule_mangle:
.cfi_startproc
movdqa %xmm0,%xmm4
movdqa .Lk_mc_forward(%rip),%xmm5
addq $16,%rdx
pxor .Lk_s63(%rip),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.Lschedule_mangle_both:
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217
addq $-16,%r8
andq $0x30,%r8
movdqu %xmm3,(%rdx)
ret
.cfi_endproc
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,@function
.align 16
vpaes_set_encrypt_key:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
.extern BORINGSSL_function_hit
.hidden BORINGSSL_function_hit
movb $1,BORINGSSL_function_hit+5(%rip)
#endif
movl %esi,%eax
shrl $5,%eax
addl $5,%eax
movl %eax,240(%rdx)
movl $0,%ecx
movl $0x30,%r8d
call _vpaes_schedule_core
xorl %eax,%eax
ret
.cfi_endproc
.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
.globl vpaes_ctr32_encrypt_blocks
.hidden vpaes_ctr32_encrypt_blocks
.type vpaes_ctr32_encrypt_blocks,@function
.align 16
vpaes_ctr32_encrypt_blocks:
.cfi_startproc
_CET_ENDBR
xchgq %rcx,%rdx
testq %rcx,%rcx
jz .Lctr32_abort
movdqu (%r8),%xmm0
movdqa .Lctr_add_one(%rip),%xmm8
subq %rdi,%rsi
call _vpaes_preheat
movdqa %xmm0,%xmm6
pshufb .Lrev_ctr(%rip),%xmm6
testq $1,%rcx
jz .Lctr32_prep_loop
movdqu (%rdi),%xmm7
call _vpaes_encrypt_core
pxor %xmm7,%xmm0
paddd %xmm8,%xmm6
movdqu %xmm0,(%rsi,%rdi,1)
subq $1,%rcx
leaq 16(%rdi),%rdi
jz .Lctr32_done
.Lctr32_prep_loop:
movdqa %xmm6,%xmm14
movdqa %xmm6,%xmm15
paddd %xmm8,%xmm15
.Lctr32_loop:
movdqa .Lrev_ctr(%rip),%xmm1
movdqa %xmm14,%xmm0
movdqa %xmm15,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
call _vpaes_encrypt_core_2x
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa .Lctr_add_two(%rip),%xmm3
pxor %xmm1,%xmm0
pxor %xmm2,%xmm6
paddd %xmm3,%xmm14
paddd %xmm3,%xmm15
movdqu %xmm0,(%rsi,%rdi,1)
movdqu %xmm6,16(%rsi,%rdi,1)
subq $2,%rcx
leaq 32(%rdi),%rdi
jnz .Lctr32_loop
.Lctr32_done:
.Lctr32_abort:
ret
.cfi_endproc
.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks
.type _vpaes_preheat,@function
.align 16
_vpaes_preheat:
.cfi_startproc
leaq .Lk_s0F(%rip),%r10
movdqa -32(%r10),%xmm10
movdqa -16(%r10),%xmm11
movdqa 0(%r10),%xmm9
movdqa 48(%r10),%xmm13
movdqa 64(%r10),%xmm12
movdqa 80(%r10),%xmm15
movdqa 96(%r10),%xmm14
ret
.cfi_endproc
.size _vpaes_preheat,.-_vpaes_preheat
.type _vpaes_consts,@object
.section .rodata
.align 64
_vpaes_consts:
.Lk_inv:
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
.Lk_s0F:
.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
.Lk_ipt:
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
.Lk_sb1:
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.Lk_sb2:
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.Lk_sbo:
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
.Lk_mc_forward:
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
.Lk_mc_backward:
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
.Lk_sr:
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
.Lk_rcon:
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
.Lk_s63:
.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
.Lk_opt:
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
.Lk_deskew:
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.Lrev_ctr:
.quad 0x0706050403020100, 0x0c0d0e0f0b0a0908
.Lctr_add_one:
.quad 0x0000000000000000, 0x0000000100000000
.Lctr_add_two:
.quad 0x0000000000000000, 0x0000000200000000
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 64
.size _vpaes_consts,.-_vpaes_consts
.text
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 49,007
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha512-armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
#endif
.text
.globl sha512_block_data_order_nohw
.def sha512_block_data_order_nohw
.type 32
.endef
.align 6
sha512_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*8
ldp x20,x21,[x0] // load context
ldp x22,x23,[x0,#2*8]
ldp x24,x25,[x0,#4*8]
add x2,x1,x2,lsl#7 // end of input
ldp x26,x27,[x0,#6*8]
adrp x30,LK512
add x30,x30,:lo12:LK512
stp x0,x2,[x29,#96]
Loop:
ldp x3,x4,[x1],#2*8
ldr x19,[x30],#8 // *K++
eor x28,x21,x22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev x3,x3 // 0
#endif
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x6,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x3 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x4,x4 // 1
#endif
ldp x5,x6,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x7,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x4 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x5,x5 // 2
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x8,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x5 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x6,x6 // 3
#endif
ldp x7,x8,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x9,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x6 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x7,x7 // 4
#endif
add x24,x24,x17 // h+=Sigma0(a)
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x10,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x7 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x10,ror#18 // Sigma1(e)
ror x10,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x10,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x8,x8 // 5
#endif
ldp x9,x10,[x1],#2*8
add x23,x23,x17 // h+=Sigma0(a)
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x11,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x8 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x11,ror#18 // Sigma1(e)
ror x11,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x11,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x9,x9 // 6
#endif
add x22,x22,x17 // h+=Sigma0(a)
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x12,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x9 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x12,ror#18 // Sigma1(e)
ror x12,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x12,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x10,x10 // 7
#endif
ldp x11,x12,[x1],#2*8
add x21,x21,x17 // h+=Sigma0(a)
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
eor x13,x25,x25,ror#23
and x17,x26,x25
bic x28,x27,x25
add x20,x20,x10 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x13,ror#18 // Sigma1(e)
ror x13,x21,#28
add x20,x20,x17 // h+=Ch(e,f,g)
eor x17,x21,x21,ror#5
add x20,x20,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x24,x24,x20 // d+=h
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x13,x17,ror#34 // Sigma0(a)
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x20,x20,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x11,x11 // 8
#endif
add x20,x20,x17 // h+=Sigma0(a)
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x14,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x11 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x14,ror#18 // Sigma1(e)
ror x14,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x14,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x12,x12 // 9
#endif
ldp x13,x14,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x15,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x12 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x15,ror#18 // Sigma1(e)
ror x15,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x15,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x13,x13 // 10
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x0,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x13 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x0,ror#18 // Sigma1(e)
ror x0,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x0,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x14,x14 // 11
#endif
ldp x15,x0,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x6,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x14 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x15,x15 // 12
#endif
add x24,x24,x17 // h+=Sigma0(a)
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x7,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x15 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x0,x0 // 13
#endif
ldp x1,x2,[x1]
add x23,x23,x17 // h+=Sigma0(a)
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x8,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x0 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x1,x1 // 14
#endif
ldr x6,[sp,#24]
add x22,x22,x17 // h+=Sigma0(a)
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x9,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x1 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x2,x2 // 15
#endif
ldr x7,[sp,#0]
add x21,x21,x17 // h+=Sigma0(a)
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
Loop_16_xx:
ldr x8,[sp,#8]
str x11,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x10,x5,#1
and x17,x25,x24
ror x9,x2,#19
bic x19,x26,x24
ror x11,x20,#28
add x27,x27,x3 // h+=X[i]
eor x16,x16,x24,ror#18
eor x10,x10,x5,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x11,x11,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x9,x9,x2,ror#61
eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x11,x20,ror#39 // Sigma0(a)
eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
add x4,x4,x13
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x4,x4,x10
add x27,x27,x17 // h+=Sigma0(a)
add x4,x4,x9
ldr x9,[sp,#16]
str x12,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x11,x6,#1
and x17,x24,x23
ror x10,x3,#19
bic x28,x25,x23
ror x12,x27,#28
add x26,x26,x4 // h+=X[i]
eor x16,x16,x23,ror#18
eor x11,x11,x6,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x12,x12,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x10,x10,x3,ror#61
eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x12,x27,ror#39 // Sigma0(a)
eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
add x5,x5,x14
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x5,x5,x11
add x26,x26,x17 // h+=Sigma0(a)
add x5,x5,x10
ldr x10,[sp,#24]
str x13,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x12,x7,#1
and x17,x23,x22
ror x11,x4,#19
bic x19,x24,x22
ror x13,x26,#28
add x25,x25,x5 // h+=X[i]
eor x16,x16,x22,ror#18
eor x12,x12,x7,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x13,x13,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x11,x11,x4,ror#61
eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x13,x26,ror#39 // Sigma0(a)
eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
add x6,x6,x15
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x6,x6,x12
add x25,x25,x17 // h+=Sigma0(a)
add x6,x6,x11
ldr x11,[sp,#0]
str x14,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x13,x8,#1
and x17,x22,x21
ror x12,x5,#19
bic x28,x23,x21
ror x14,x25,#28
add x24,x24,x6 // h+=X[i]
eor x16,x16,x21,ror#18
eor x13,x13,x8,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x14,x14,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x12,x12,x5,ror#61
eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x14,x25,ror#39 // Sigma0(a)
eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
add x7,x7,x0
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x7,x7,x13
add x24,x24,x17 // h+=Sigma0(a)
add x7,x7,x12
ldr x12,[sp,#8]
str x15,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x14,x9,#1
and x17,x21,x20
ror x13,x6,#19
bic x19,x22,x20
ror x15,x24,#28
add x23,x23,x7 // h+=X[i]
eor x16,x16,x20,ror#18
eor x14,x14,x9,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x15,x15,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x13,x13,x6,ror#61
eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x15,x24,ror#39 // Sigma0(a)
eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
add x8,x8,x1
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x8,x8,x14
add x23,x23,x17 // h+=Sigma0(a)
add x8,x8,x13
ldr x13,[sp,#16]
str x0,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x15,x10,#1
and x17,x20,x27
ror x14,x7,#19
bic x28,x21,x27
ror x0,x23,#28
add x22,x22,x8 // h+=X[i]
eor x16,x16,x27,ror#18
eor x15,x15,x10,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x0,x0,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x14,x14,x7,ror#61
eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x0,x23,ror#39 // Sigma0(a)
eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
add x9,x9,x2
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x9,x9,x15
add x22,x22,x17 // h+=Sigma0(a)
add x9,x9,x14
ldr x14,[sp,#24]
str x1,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x0,x11,#1
and x17,x27,x26
ror x15,x8,#19
bic x19,x20,x26
ror x1,x22,#28
add x21,x21,x9 // h+=X[i]
eor x16,x16,x26,ror#18
eor x0,x0,x11,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x1,x1,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x15,x15,x8,ror#61
eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x1,x22,ror#39 // Sigma0(a)
eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
add x10,x10,x3
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x10,x10,x0
add x21,x21,x17 // h+=Sigma0(a)
add x10,x10,x15
ldr x15,[sp,#0]
str x2,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x1,x12,#1
and x17,x26,x25
ror x0,x9,#19
bic x28,x27,x25
ror x2,x21,#28
add x20,x20,x10 // h+=X[i]
eor x16,x16,x25,ror#18
eor x1,x1,x12,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x2,x2,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x0,x0,x9,ror#61
eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x2,x21,ror#39 // Sigma0(a)
eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
add x11,x11,x4
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x11,x11,x1
add x20,x20,x17 // h+=Sigma0(a)
add x11,x11,x0
ldr x0,[sp,#8]
str x3,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x2,x13,#1
and x17,x25,x24
ror x1,x10,#19
bic x19,x26,x24
ror x3,x20,#28
add x27,x27,x11 // h+=X[i]
eor x16,x16,x24,ror#18
eor x2,x2,x13,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x3,x3,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x1,x1,x10,ror#61
eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x3,x20,ror#39 // Sigma0(a)
eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
add x12,x12,x5
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x12,x12,x2
add x27,x27,x17 // h+=Sigma0(a)
add x12,x12,x1
ldr x1,[sp,#16]
str x4,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x3,x14,#1
and x17,x24,x23
ror x2,x11,#19
bic x28,x25,x23
ror x4,x27,#28
add x26,x26,x12 // h+=X[i]
eor x16,x16,x23,ror#18
eor x3,x3,x14,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x4,x4,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x2,x2,x11,ror#61
eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x4,x27,ror#39 // Sigma0(a)
eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
add x13,x13,x6
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x13,x13,x3
add x26,x26,x17 // h+=Sigma0(a)
add x13,x13,x2
ldr x2,[sp,#24]
str x5,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x4,x15,#1
and x17,x23,x22
ror x3,x12,#19
bic x19,x24,x22
ror x5,x26,#28
add x25,x25,x13 // h+=X[i]
eor x16,x16,x22,ror#18
eor x4,x4,x15,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x5,x5,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x3,x3,x12,ror#61
eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x5,x26,ror#39 // Sigma0(a)
eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
add x14,x14,x7
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x14,x14,x4
add x25,x25,x17 // h+=Sigma0(a)
add x14,x14,x3
ldr x3,[sp,#0]
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x5,x0,#1
and x17,x22,x21
ror x4,x13,#19
bic x28,x23,x21
ror x6,x25,#28
add x24,x24,x14 // h+=X[i]
eor x16,x16,x21,ror#18
eor x5,x5,x0,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x6,x6,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x4,x4,x13,ror#61
eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x25,ror#39 // Sigma0(a)
eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
add x15,x15,x8
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x15,x15,x5
add x24,x24,x17 // h+=Sigma0(a)
add x15,x15,x4
ldr x4,[sp,#8]
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x6,x1,#1
and x17,x21,x20
ror x5,x14,#19
bic x19,x22,x20
ror x7,x24,#28
add x23,x23,x15 // h+=X[i]
eor x16,x16,x20,ror#18
eor x6,x6,x1,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x7,x7,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x5,x5,x14,ror#61
eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x24,ror#39 // Sigma0(a)
eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
add x0,x0,x9
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x0,x0,x6
add x23,x23,x17 // h+=Sigma0(a)
add x0,x0,x5
ldr x5,[sp,#16]
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x7,x2,#1
and x17,x20,x27
ror x6,x15,#19
bic x28,x21,x27
ror x8,x23,#28
add x22,x22,x0 // h+=X[i]
eor x16,x16,x27,ror#18
eor x7,x7,x2,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x8,x8,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x6,x6,x15,ror#61
eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x23,ror#39 // Sigma0(a)
eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
add x1,x1,x10
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x1,x1,x7
add x22,x22,x17 // h+=Sigma0(a)
add x1,x1,x6
ldr x6,[sp,#24]
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x8,x3,#1
and x17,x27,x26
ror x7,x0,#19
bic x19,x20,x26
ror x9,x22,#28
add x21,x21,x1 // h+=X[i]
eor x16,x16,x26,ror#18
eor x8,x8,x3,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x9,x9,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x7,x7,x0,ror#61
eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x22,ror#39 // Sigma0(a)
eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
add x2,x2,x11
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x2,x2,x8
add x21,x21,x17 // h+=Sigma0(a)
add x2,x2,x7
ldr x7,[sp,#0]
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
cbnz x19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#648 // rewind
ldp x3,x4,[x0]
ldp x5,x6,[x0,#2*8]
add x1,x1,#14*8 // advance input pointer
ldp x7,x8,[x0,#4*8]
add x20,x20,x3
ldp x9,x10,[x0,#6*8]
add x21,x21,x4
add x22,x22,x5
add x23,x23,x6
stp x20,x21,[x0]
add x24,x24,x7
add x25,x25,x8
stp x22,x23,[x0,#2*8]
add x26,x26,x9
add x27,x27,x10
cmp x1,x2
stp x24,x25,[x0,#4*8]
stp x26,x27,[x0,#6*8]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*8
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section .rodata
.align 6
LK512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0 // terminator
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl sha512_block_data_order_hw
.def sha512_block_data_order_hw
.type 32
.endef
.align 6
sha512_block_data_order_hw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context
adrp x3,LK512
add x3,x3,:lo12:LK512
rev64 v16.16b,v16.16b
rev64 v17.16b,v17.16b
rev64 v18.16b,v18.16b
rev64 v19.16b,v19.16b
rev64 v20.16b,v20.16b
rev64 v21.16b,v21.16b
rev64 v22.16b,v22.16b
rev64 v23.16b,v23.16b
b Loop_hw
.align 4
Loop_hw:
ld1 {v24.2d},[x3],#16
subs x2,x2,#1
sub x4,x1,#128
orr v26.16b,v0.16b,v0.16b // offload
orr v27.16b,v1.16b,v1.16b
orr v28.16b,v2.16b,v2.16b
orr v29.16b,v3.16b,v3.16b
csel x1,x1,x4,ne // conditional rewind
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v16.2d
ld1 {v16.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v16.16b,v16.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v17.2d
ld1 {v17.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v17.16b,v17.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v18.2d
ld1 {v18.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v18.16b,v18.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v19.2d
ld1 {v19.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
rev64 v19.16b,v19.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v20.2d
ld1 {v20.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
rev64 v20.16b,v20.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v21.2d
ld1 {v21.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v21.16b,v21.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v22.2d
ld1 {v22.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v22.16b,v22.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
sub x3,x3,#80*8 // rewind
add v25.2d,v25.2d,v23.2d
ld1 {v23.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v23.16b,v23.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v0.2d,v0.2d,v26.2d // accumulate
add v1.2d,v1.2d,v27.2d
add v2.2d,v2.2d,v28.2d
add v3.2d,v3.2d,v29.2d
cbnz x2,Loop_hw
st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
mktmansour/MKT-KSA-Geolocation-Security
| 4,135
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghashv8-armx-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#if __ARM_MAX_ARCH__>=7
.text
.globl _gcm_init_clmul
.private_extern _gcm_init_clmul
.align 4
_gcm_init_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.globl _gcm_gmult_clmul
.private_extern _gcm_gmult_clmul
.align 4
_gcm_gmult_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 12,622
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha-x86-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl ChaCha20_ctr32_ssse3
.hidden ChaCha20_ctr32_ssse3
.type ChaCha20_ctr32_ssse3,@function
.align 16
ChaCha20_ctr32_ssse3:
.L_ChaCha20_ctr32_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
call .Lpic_point
.Lpic_point:
popl %eax
movl 20(%esp),%edi
movl 24(%esp),%esi
movl 28(%esp),%ecx
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $524,%esp
andl $-64,%esp
movl %ebp,512(%esp)
leal .Lssse3_data-.Lpic_point(%eax),%eax
movdqu (%ebx),%xmm3
cmpl $256,%ecx
jb .L0001x
movl %edx,516(%esp)
movl %ebx,520(%esp)
subl $256,%ecx
leal 384(%esp),%ebp
movdqu (%edx),%xmm7
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
paddd 48(%eax),%xmm0
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
psubd 64(%eax),%xmm0
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,64(%ebp)
movdqa %xmm1,80(%ebp)
movdqa %xmm2,96(%ebp)
movdqa %xmm3,112(%ebp)
movdqu 16(%edx),%xmm3
movdqa %xmm4,-64(%ebp)
movdqa %xmm5,-48(%ebp)
movdqa %xmm6,-32(%ebp)
movdqa %xmm7,-16(%ebp)
movdqa 32(%eax),%xmm7
leal 128(%esp),%ebx
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,(%ebp)
movdqa %xmm1,16(%ebp)
movdqa %xmm2,32(%ebp)
movdqa %xmm3,48(%ebp)
movdqa %xmm4,-128(%ebp)
movdqa %xmm5,-112(%ebp)
movdqa %xmm6,-96(%ebp)
movdqa %xmm7,-80(%ebp)
leal 128(%esi),%esi
leal 128(%edi),%edi
jmp .L001outer_loop
.align 16
.L001outer_loop:
movdqa -112(%ebp),%xmm1
movdqa -96(%ebp),%xmm2
movdqa -80(%ebp),%xmm3
movdqa -48(%ebp),%xmm5
movdqa -32(%ebp),%xmm6
movdqa -16(%ebp),%xmm7
movdqa %xmm1,-112(%ebx)
movdqa %xmm2,-96(%ebx)
movdqa %xmm3,-80(%ebx)
movdqa %xmm5,-48(%ebx)
movdqa %xmm6,-32(%ebx)
movdqa %xmm7,-16(%ebx)
movdqa 32(%ebp),%xmm2
movdqa 48(%ebp),%xmm3
movdqa 64(%ebp),%xmm4
movdqa 80(%ebp),%xmm5
movdqa 96(%ebp),%xmm6
movdqa 112(%ebp),%xmm7
paddd 64(%eax),%xmm4
movdqa %xmm2,32(%ebx)
movdqa %xmm3,48(%ebx)
movdqa %xmm4,64(%ebx)
movdqa %xmm5,80(%ebx)
movdqa %xmm6,96(%ebx)
movdqa %xmm7,112(%ebx)
movdqa %xmm4,64(%ebp)
movdqa -128(%ebp),%xmm0
movdqa %xmm4,%xmm6
movdqa -64(%ebp),%xmm3
movdqa (%ebp),%xmm4
movdqa 16(%ebp),%xmm5
movl $10,%edx
nop
.align 16
.L002loop:
paddd %xmm3,%xmm0
movdqa %xmm3,%xmm2
pxor %xmm0,%xmm6
pshufb (%eax),%xmm6
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -48(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 80(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,64(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-64(%ebx)
paddd %xmm7,%xmm5
movdqa 32(%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -32(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 96(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,80(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,16(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-48(%ebx)
paddd %xmm6,%xmm4
movdqa 48(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -16(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 112(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,96(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-32(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa -48(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,%xmm6
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
pshufb (%eax),%xmm6
movdqa %xmm3,-16(%ebx)
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -32(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 64(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,112(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,32(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-48(%ebx)
paddd %xmm7,%xmm5
movdqa (%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -16(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 80(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,64(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,48(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-32(%ebx)
paddd %xmm6,%xmm4
movdqa 16(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -64(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 96(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,80(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-16(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 64(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,96(%ebx)
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
por %xmm1,%xmm3
decl %edx
jnz .L002loop
movdqa %xmm3,-64(%ebx)
movdqa %xmm4,(%ebx)
movdqa %xmm5,16(%ebx)
movdqa %xmm6,64(%ebx)
movdqa %xmm7,96(%ebx)
movdqa -112(%ebx),%xmm1
movdqa -96(%ebx),%xmm2
movdqa -80(%ebx),%xmm3
paddd -128(%ebp),%xmm0
paddd -112(%ebp),%xmm1
paddd -96(%ebp),%xmm2
paddd -80(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa -64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa -48(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa -32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa -16(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd -64(%ebp),%xmm0
paddd -48(%ebp),%xmm1
paddd -32(%ebp),%xmm2
paddd -16(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa (%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 16(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 48(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd (%ebp),%xmm0
paddd 16(%ebp),%xmm1
paddd 32(%ebp),%xmm2
paddd 48(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa 64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 80(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 96(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 112(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd 64(%ebp),%xmm0
paddd 80(%ebp),%xmm1
paddd 96(%ebp),%xmm2
paddd 112(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 208(%esi),%esi
pxor %xmm0,%xmm4
pxor %xmm1,%xmm5
pxor %xmm2,%xmm6
pxor %xmm3,%xmm7
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 208(%edi),%edi
subl $256,%ecx
jnc .L001outer_loop
addl $256,%ecx
jz .L003done
movl 520(%esp),%ebx
leal -128(%esi),%esi
movl 516(%esp),%edx
leal -128(%edi),%edi
movd 64(%ebp),%xmm2
movdqu (%ebx),%xmm3
paddd 96(%eax),%xmm2
pand 112(%eax),%xmm3
por %xmm2,%xmm3
.L0001x:
movdqa 32(%eax),%xmm0
movdqu (%edx),%xmm1
movdqu 16(%edx),%xmm2
movdqa (%eax),%xmm6
movdqa 16(%eax),%xmm7
movl %ebp,48(%esp)
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
movl $10,%edx
jmp .L004loop1x
.align 16
.L005outer1x:
movdqa 80(%eax),%xmm3
movdqa (%esp),%xmm0
movdqa 16(%esp),%xmm1
movdqa 32(%esp),%xmm2
paddd 48(%esp),%xmm3
movl $10,%edx
movdqa %xmm3,48(%esp)
jmp .L004loop1x
.align 16
.L004loop1x:
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $57,%xmm1,%xmm1
pshufd $147,%xmm3,%xmm3
nop
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $147,%xmm1,%xmm1
pshufd $57,%xmm3,%xmm3
decl %edx
jnz .L004loop1x
paddd (%esp),%xmm0
paddd 16(%esp),%xmm1
paddd 32(%esp),%xmm2
paddd 48(%esp),%xmm3
cmpl $64,%ecx
jb .L006tail
movdqu (%esi),%xmm4
movdqu 16(%esi),%xmm5
pxor %xmm4,%xmm0
movdqu 32(%esi),%xmm4
pxor %xmm5,%xmm1
movdqu 48(%esi),%xmm5
pxor %xmm4,%xmm2
pxor %xmm5,%xmm3
leal 64(%esi),%esi
movdqu %xmm0,(%edi)
movdqu %xmm1,16(%edi)
movdqu %xmm2,32(%edi)
movdqu %xmm3,48(%edi)
leal 64(%edi),%edi
subl $64,%ecx
jnz .L005outer1x
jmp .L003done
.L006tail:
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
xorl %eax,%eax
xorl %edx,%edx
xorl %ebp,%ebp
.L007tail_loop:
movb (%esp,%ebp,1),%al
movb (%esi,%ebp,1),%dl
leal 1(%ebp),%ebp
xorb %dl,%al
movb %al,-1(%edi,%ebp,1)
decl %ecx
jnz .L007tail_loop
.L003done:
movl 512(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size ChaCha20_ctr32_ssse3,.-.L_ChaCha20_ctr32_ssse3_begin
.align 64
.Lssse3_data:
.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
.long 1634760805,857760878,2036477234,1797285236
.long 0,1,2,3
.long 4,4,4,4
.long 1,0,0,0
.long 4,0,0,0
.long 0,-1,-1,-1
.align 64
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54
.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32
.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111
.byte 114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 11,024
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghash-neon-armv8-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
.text
.globl gcm_init_neon
.hidden gcm_init_neon
.type gcm_init_neon,%function
.align 4
gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.size gcm_init_neon,.-gcm_init_neon
.globl gcm_gmult_neon
.hidden gcm_gmult_neon
.type gcm_gmult_neon,%function
.align 4
gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, .Lmasks // load constants
add x9, x9, :lo12:.Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b .Lgmult_neon
.size gcm_gmult_neon,.-gcm_gmult_neon
.globl gcm_ghash_neon
.hidden gcm_ghash_neon
.type gcm_ghash_neon,%function
.align 4
gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, .Lmasks // load constants
add x9, x9, :lo12:.Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
.Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
.Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne .Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.size gcm_ghash_neon,.-gcm_ghash_neon
.section .rodata
.align 4
.Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 7,855
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aesv8-armx-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.section .rodata
.align 5
.Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl aes_hw_set_encrypt_key
.hidden aes_hw_set_encrypt_key
.type aes_hw_set_encrypt_key,%function
.align 5
aes_hw_set_encrypt_key:
.Lenc_key:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x3,#-2
cmp w1,#128
b.lt .Lenc_key_abort
cmp w1,#256
b.gt .Lenc_key_abort
tst w1,#0x3f
b.ne .Lenc_key_abort
adrp x3,.Lrcon
add x3,x3,:lo12:.Lrcon
cmp w1,#192
eor v0.16b,v0.16b,v0.16b
ld1 {v3.16b},[x0],#16
mov w1,#8 // reuse w1
ld1 {v1.4s,v2.4s},[x3],#32
b.lt .Loop128
// 192-bit key support was removed.
b .L256
.align 4
.Loop128:
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
b.ne .Loop128
ld1 {v1.4s},[x3]
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2]
add x2,x2,#0x50
mov w12,#10
b .Ldone
// 192-bit key support was removed.
.align 4
.L256:
ld1 {v4.16b},[x0]
mov w1,#7
mov w12,#14
st1 {v3.4s},[x2],#16
.Loop256:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2],#16
b.eq .Ldone
dup v6.4s,v3.s[3] // just splat
ext v5.16b,v0.16b,v4.16b,#12
aese v6.16b,v0.16b
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
eor v4.16b,v4.16b,v6.16b
b .Loop256
.Ldone:
str w12,[x2]
mov x3,#0
.Lenc_key_abort:
mov x0,x3 // return value
ldr x29,[sp],#16
ret
.size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,%function
.align 5
aes_hw_ctr32_encrypt_blocks:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldr w5,[x3,#240]
ldr w8, [x4, #12]
ld1 {v0.4s},[x4]
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#4
mov x12,#16
cmp x2,#2
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
sub w5,w5,#2
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
csel x12,xzr,x12,lo
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __AARCH64EB__
rev w8, w8
#endif
add w10, w8, #1
orr v6.16b,v0.16b,v0.16b
rev w10, w10
mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls .Lctr32_tail
rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias
orr v18.16b,v6.16b,v6.16b
b .Loop3x_ctr32
.align 4
.Loop3x_ctr32:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v18.16b,v17.16b
aesmc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt .Loop3x_ctr32
aese v0.16b,v16.16b
aesmc v4.16b,v0.16b
aese v1.16b,v16.16b
aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16
add w9,w8,#1
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
rev w9,w9
aese v4.16b,v17.16b
aesmc v4.16b,v4.16b
aese v5.16b,v17.16b
aesmc v5.16b,v5.16b
ld1 {v19.16b},[x0],#16
mov x7,x3
aese v18.16b,v17.16b
aesmc v17.16b,v18.16b
aese v4.16b,v20.16b
aesmc v4.16b,v4.16b
aese v5.16b,v20.16b
aesmc v5.16b,v5.16b
eor v2.16b,v2.16b,v7.16b
add w10,w8,#2
aese v17.16b,v20.16b
aesmc v17.16b,v17.16b
eor v3.16b,v3.16b,v7.16b
add w8,w8,#3
aese v4.16b,v21.16b
aesmc v4.16b,v4.16b
aese v5.16b,v21.16b
aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b
mov v6.s[3], w9
aese v17.16b,v21.16b
aesmc v17.16b,v17.16b
orr v0.16b,v6.16b,v6.16b
rev w10,w10
aese v4.16b,v22.16b
aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b
aesmc v5.16b,v5.16b
orr v1.16b,v6.16b,v6.16b
mov v6.s[3], w12
aese v17.16b,v22.16b
aesmc v17.16b,v17.16b
orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3
aese v4.16b,v23.16b
aese v5.16b,v23.16b
aese v17.16b,v23.16b
eor v2.16b,v2.16b,v4.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
st1 {v2.16b},[x1],#16
eor v3.16b,v3.16b,v5.16b
mov w6,w5
st1 {v3.16b},[x1],#16
eor v19.16b,v19.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v19.16b},[x1],#16
b.hs .Loop3x_ctr32
adds x2,x2,#3
b.eq .Lctr32_done
cmp x2,#1
mov x12,#16
csel x12,xzr,x12,eq
.Lctr32_tail:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v17.4s},[x7],#16
b.gt .Lctr32_tail
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v2.16b},[x0],x12
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
ld1 {v3.16b},[x0]
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
eor v2.16b,v2.16b,v7.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
eor v3.16b,v3.16b,v7.16b
aese v0.16b,v23.16b
aese v1.16b,v23.16b
cmp x2,#1
eor v2.16b,v2.16b,v0.16b
eor v3.16b,v3.16b,v1.16b
st1 {v2.16b},[x1],#16
b.eq .Lctr32_done
st1 {v3.16b},[x1]
.Lctr32_done:
ldr x29,[sp],#16
ret
.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 49,242
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha512-armv8-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
#endif
.text
.globl sha512_block_data_order_nohw
.hidden sha512_block_data_order_nohw
.type sha512_block_data_order_nohw,%function
.align 6
sha512_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*8
ldp x20,x21,[x0] // load context
ldp x22,x23,[x0,#2*8]
ldp x24,x25,[x0,#4*8]
add x2,x1,x2,lsl#7 // end of input
ldp x26,x27,[x0,#6*8]
adrp x30,.LK512
add x30,x30,:lo12:.LK512
stp x0,x2,[x29,#96]
.Loop:
ldp x3,x4,[x1],#2*8
ldr x19,[x30],#8 // *K++
eor x28,x21,x22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev x3,x3 // 0
#endif
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x6,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x3 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x4,x4 // 1
#endif
ldp x5,x6,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x7,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x4 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x5,x5 // 2
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x8,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x5 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x6,x6 // 3
#endif
ldp x7,x8,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x9,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x6 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x7,x7 // 4
#endif
add x24,x24,x17 // h+=Sigma0(a)
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x10,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x7 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x10,ror#18 // Sigma1(e)
ror x10,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x10,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x8,x8 // 5
#endif
ldp x9,x10,[x1],#2*8
add x23,x23,x17 // h+=Sigma0(a)
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x11,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x8 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x11,ror#18 // Sigma1(e)
ror x11,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x11,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x9,x9 // 6
#endif
add x22,x22,x17 // h+=Sigma0(a)
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x12,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x9 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x12,ror#18 // Sigma1(e)
ror x12,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x12,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x10,x10 // 7
#endif
ldp x11,x12,[x1],#2*8
add x21,x21,x17 // h+=Sigma0(a)
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
eor x13,x25,x25,ror#23
and x17,x26,x25
bic x28,x27,x25
add x20,x20,x10 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x13,ror#18 // Sigma1(e)
ror x13,x21,#28
add x20,x20,x17 // h+=Ch(e,f,g)
eor x17,x21,x21,ror#5
add x20,x20,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x24,x24,x20 // d+=h
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x13,x17,ror#34 // Sigma0(a)
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x20,x20,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x11,x11 // 8
#endif
add x20,x20,x17 // h+=Sigma0(a)
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x14,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x11 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x14,ror#18 // Sigma1(e)
ror x14,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x14,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x12,x12 // 9
#endif
ldp x13,x14,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x15,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x12 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x15,ror#18 // Sigma1(e)
ror x15,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x15,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x13,x13 // 10
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x0,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x13 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x0,ror#18 // Sigma1(e)
ror x0,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x0,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x14,x14 // 11
#endif
ldp x15,x0,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x6,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x14 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x15,x15 // 12
#endif
add x24,x24,x17 // h+=Sigma0(a)
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x7,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x15 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x0,x0 // 13
#endif
ldp x1,x2,[x1]
add x23,x23,x17 // h+=Sigma0(a)
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x8,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x0 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x1,x1 // 14
#endif
ldr x6,[sp,#24]
add x22,x22,x17 // h+=Sigma0(a)
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x9,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x1 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x2,x2 // 15
#endif
ldr x7,[sp,#0]
add x21,x21,x17 // h+=Sigma0(a)
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
.Loop_16_xx:
ldr x8,[sp,#8]
str x11,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x10,x5,#1
and x17,x25,x24
ror x9,x2,#19
bic x19,x26,x24
ror x11,x20,#28
add x27,x27,x3 // h+=X[i]
eor x16,x16,x24,ror#18
eor x10,x10,x5,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x11,x11,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x9,x9,x2,ror#61
eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x11,x20,ror#39 // Sigma0(a)
eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
add x4,x4,x13
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x4,x4,x10
add x27,x27,x17 // h+=Sigma0(a)
add x4,x4,x9
ldr x9,[sp,#16]
str x12,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x11,x6,#1
and x17,x24,x23
ror x10,x3,#19
bic x28,x25,x23
ror x12,x27,#28
add x26,x26,x4 // h+=X[i]
eor x16,x16,x23,ror#18
eor x11,x11,x6,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x12,x12,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x10,x10,x3,ror#61
eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x12,x27,ror#39 // Sigma0(a)
eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
add x5,x5,x14
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x5,x5,x11
add x26,x26,x17 // h+=Sigma0(a)
add x5,x5,x10
ldr x10,[sp,#24]
str x13,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x12,x7,#1
and x17,x23,x22
ror x11,x4,#19
bic x19,x24,x22
ror x13,x26,#28
add x25,x25,x5 // h+=X[i]
eor x16,x16,x22,ror#18
eor x12,x12,x7,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x13,x13,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x11,x11,x4,ror#61
eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x13,x26,ror#39 // Sigma0(a)
eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
add x6,x6,x15
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x6,x6,x12
add x25,x25,x17 // h+=Sigma0(a)
add x6,x6,x11
ldr x11,[sp,#0]
str x14,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x13,x8,#1
and x17,x22,x21
ror x12,x5,#19
bic x28,x23,x21
ror x14,x25,#28
add x24,x24,x6 // h+=X[i]
eor x16,x16,x21,ror#18
eor x13,x13,x8,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x14,x14,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x12,x12,x5,ror#61
eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x14,x25,ror#39 // Sigma0(a)
eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
add x7,x7,x0
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x7,x7,x13
add x24,x24,x17 // h+=Sigma0(a)
add x7,x7,x12
ldr x12,[sp,#8]
str x15,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x14,x9,#1
and x17,x21,x20
ror x13,x6,#19
bic x19,x22,x20
ror x15,x24,#28
add x23,x23,x7 // h+=X[i]
eor x16,x16,x20,ror#18
eor x14,x14,x9,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x15,x15,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x13,x13,x6,ror#61
eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x15,x24,ror#39 // Sigma0(a)
eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
add x8,x8,x1
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x8,x8,x14
add x23,x23,x17 // h+=Sigma0(a)
add x8,x8,x13
ldr x13,[sp,#16]
str x0,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x15,x10,#1
and x17,x20,x27
ror x14,x7,#19
bic x28,x21,x27
ror x0,x23,#28
add x22,x22,x8 // h+=X[i]
eor x16,x16,x27,ror#18
eor x15,x15,x10,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x0,x0,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x14,x14,x7,ror#61
eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x0,x23,ror#39 // Sigma0(a)
eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
add x9,x9,x2
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x9,x9,x15
add x22,x22,x17 // h+=Sigma0(a)
add x9,x9,x14
ldr x14,[sp,#24]
str x1,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x0,x11,#1
and x17,x27,x26
ror x15,x8,#19
bic x19,x20,x26
ror x1,x22,#28
add x21,x21,x9 // h+=X[i]
eor x16,x16,x26,ror#18
eor x0,x0,x11,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x1,x1,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x15,x15,x8,ror#61
eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x1,x22,ror#39 // Sigma0(a)
eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
add x10,x10,x3
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x10,x10,x0
add x21,x21,x17 // h+=Sigma0(a)
add x10,x10,x15
ldr x15,[sp,#0]
str x2,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x1,x12,#1
and x17,x26,x25
ror x0,x9,#19
bic x28,x27,x25
ror x2,x21,#28
add x20,x20,x10 // h+=X[i]
eor x16,x16,x25,ror#18
eor x1,x1,x12,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x2,x2,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x0,x0,x9,ror#61
eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x2,x21,ror#39 // Sigma0(a)
eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
add x11,x11,x4
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x11,x11,x1
add x20,x20,x17 // h+=Sigma0(a)
add x11,x11,x0
ldr x0,[sp,#8]
str x3,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x2,x13,#1
and x17,x25,x24
ror x1,x10,#19
bic x19,x26,x24
ror x3,x20,#28
add x27,x27,x11 // h+=X[i]
eor x16,x16,x24,ror#18
eor x2,x2,x13,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x3,x3,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x1,x1,x10,ror#61
eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x3,x20,ror#39 // Sigma0(a)
eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
add x12,x12,x5
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x12,x12,x2
add x27,x27,x17 // h+=Sigma0(a)
add x12,x12,x1
ldr x1,[sp,#16]
str x4,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x3,x14,#1
and x17,x24,x23
ror x2,x11,#19
bic x28,x25,x23
ror x4,x27,#28
add x26,x26,x12 // h+=X[i]
eor x16,x16,x23,ror#18
eor x3,x3,x14,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x4,x4,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x2,x2,x11,ror#61
eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x4,x27,ror#39 // Sigma0(a)
eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
add x13,x13,x6
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x13,x13,x3
add x26,x26,x17 // h+=Sigma0(a)
add x13,x13,x2
ldr x2,[sp,#24]
str x5,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x4,x15,#1
and x17,x23,x22
ror x3,x12,#19
bic x19,x24,x22
ror x5,x26,#28
add x25,x25,x13 // h+=X[i]
eor x16,x16,x22,ror#18
eor x4,x4,x15,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x5,x5,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x3,x3,x12,ror#61
eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x5,x26,ror#39 // Sigma0(a)
eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
add x14,x14,x7
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x14,x14,x4
add x25,x25,x17 // h+=Sigma0(a)
add x14,x14,x3
ldr x3,[sp,#0]
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x5,x0,#1
and x17,x22,x21
ror x4,x13,#19
bic x28,x23,x21
ror x6,x25,#28
add x24,x24,x14 // h+=X[i]
eor x16,x16,x21,ror#18
eor x5,x5,x0,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x6,x6,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x4,x4,x13,ror#61
eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x25,ror#39 // Sigma0(a)
eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
add x15,x15,x8
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x15,x15,x5
add x24,x24,x17 // h+=Sigma0(a)
add x15,x15,x4
ldr x4,[sp,#8]
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x6,x1,#1
and x17,x21,x20
ror x5,x14,#19
bic x19,x22,x20
ror x7,x24,#28
add x23,x23,x15 // h+=X[i]
eor x16,x16,x20,ror#18
eor x6,x6,x1,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x7,x7,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x5,x5,x14,ror#61
eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x24,ror#39 // Sigma0(a)
eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
add x0,x0,x9
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x0,x0,x6
add x23,x23,x17 // h+=Sigma0(a)
add x0,x0,x5
ldr x5,[sp,#16]
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x7,x2,#1
and x17,x20,x27
ror x6,x15,#19
bic x28,x21,x27
ror x8,x23,#28
add x22,x22,x0 // h+=X[i]
eor x16,x16,x27,ror#18
eor x7,x7,x2,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x8,x8,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x6,x6,x15,ror#61
eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x23,ror#39 // Sigma0(a)
eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
add x1,x1,x10
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x1,x1,x7
add x22,x22,x17 // h+=Sigma0(a)
add x1,x1,x6
ldr x6,[sp,#24]
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x8,x3,#1
and x17,x27,x26
ror x7,x0,#19
bic x19,x20,x26
ror x9,x22,#28
add x21,x21,x1 // h+=X[i]
eor x16,x16,x26,ror#18
eor x8,x8,x3,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x9,x9,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x7,x7,x0,ror#61
eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x22,ror#39 // Sigma0(a)
eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
add x2,x2,x11
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x2,x2,x8
add x21,x21,x17 // h+=Sigma0(a)
add x2,x2,x7
ldr x7,[sp,#0]
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
cbnz x19,.Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#648 // rewind
ldp x3,x4,[x0]
ldp x5,x6,[x0,#2*8]
add x1,x1,#14*8 // advance input pointer
ldp x7,x8,[x0,#4*8]
add x20,x20,x3
ldp x9,x10,[x0,#6*8]
add x21,x21,x4
add x22,x22,x5
add x23,x23,x6
stp x20,x21,[x0]
add x24,x24,x7
add x25,x25,x8
stp x22,x23,[x0,#2*8]
add x26,x26,x9
add x27,x27,x10
cmp x1,x2
stp x24,x25,[x0,#4*8]
stp x26,x27,[x0,#6*8]
b.ne .Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*8
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw
.section .rodata
.align 6
.type .LK512,%object
.LK512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0 // terminator
.size .LK512,.-.LK512
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl sha512_block_data_order_hw
.hidden sha512_block_data_order_hw
.type sha512_block_data_order_hw,%function
.align 6
sha512_block_data_order_hw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context
adrp x3,.LK512
add x3,x3,:lo12:.LK512
rev64 v16.16b,v16.16b
rev64 v17.16b,v17.16b
rev64 v18.16b,v18.16b
rev64 v19.16b,v19.16b
rev64 v20.16b,v20.16b
rev64 v21.16b,v21.16b
rev64 v22.16b,v22.16b
rev64 v23.16b,v23.16b
b .Loop_hw
.align 4
.Loop_hw:
ld1 {v24.2d},[x3],#16
subs x2,x2,#1
sub x4,x1,#128
orr v26.16b,v0.16b,v0.16b // offload
orr v27.16b,v1.16b,v1.16b
orr v28.16b,v2.16b,v2.16b
orr v29.16b,v3.16b,v3.16b
csel x1,x1,x4,ne // conditional rewind
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v16.2d
ld1 {v16.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v16.16b,v16.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v17.2d
ld1 {v17.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v17.16b,v17.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v18.2d
ld1 {v18.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v18.16b,v18.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v19.2d
ld1 {v19.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
rev64 v19.16b,v19.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v20.2d
ld1 {v20.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
rev64 v20.16b,v20.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v21.2d
ld1 {v21.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v21.16b,v21.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v22.2d
ld1 {v22.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v22.16b,v22.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
sub x3,x3,#80*8 // rewind
add v25.2d,v25.2d,v23.2d
ld1 {v23.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v23.16b,v23.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v0.2d,v0.2d,v26.2d // accumulate
add v1.2d,v1.2d,v27.2d
add v2.2d,v2.2d,v28.2d
add v3.2d,v3.2d,v29.2d
cbnz x2,.Loop_hw
st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context
ldr x29,[sp],#16
ret
.size sha512_block_data_order_hw,.-sha512_block_data_order_hw
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
mktmansour/MKT-KSA-Geolocation-Security
| 193,289
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha20_poly1305_x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.section .rodata
.align 64
chacha20_poly1305_constants:
.Lchacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.Lrol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.Lrol16:
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
.Lavx2_init:
.long 0,0,0,0
.Lsse_inc:
.long 1,0,0,0
.Lavx2_inc:
.long 2,0,0,0,2,0,0,0
.Lclamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF
.align 16
.Land_masks:
.byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
.text
.type poly_hash_ad_internal,@function
.align 64
poly_hash_ad_internal:
.cfi_startproc
.cfi_def_cfa rsp, 8
xorq %r10,%r10
xorq %r11,%r11
xorq %r12,%r12
cmpq $13,%r8
jne .Lhash_ad_loop
.Lpoly_fast_tls_ad:
movq (%rcx),%r10
movq 5(%rcx),%r11
shrq $24,%r11
movq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
ret
.Lhash_ad_loop:
cmpq $16,%r8
jb .Lhash_ad_tail
addq 0+0(%rcx),%r10
adcq 8+0(%rcx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rcx),%rcx
subq $16,%r8
jmp .Lhash_ad_loop
.Lhash_ad_tail:
cmpq $0,%r8
je .Lhash_ad_done
xorq %r13,%r13
xorq %r14,%r14
xorq %r15,%r15
addq %r8,%rcx
.Lhash_ad_tail_loop:
shldq $8,%r13,%r14
shlq $8,%r13
movzbq -1(%rcx),%r15
xorq %r15,%r13
decq %rcx
decq %r8
jne .Lhash_ad_tail_loop
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lhash_ad_done:
ret
.cfi_endproc
.size poly_hash_ad_internal, .-poly_hash_ad_internal
.globl chacha20_poly1305_open_sse41
.hidden chacha20_poly1305_open_sse41
.type chacha20_poly1305_open_sse41,@function
.align 64
chacha20_poly1305_open_sse41:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
pushq %r9
.cfi_adjust_cfa_offset 8
.cfi_offset %r9,-64
subq $288 + 0 + 32,%rsp
.cfi_adjust_cfa_offset 288 + 32
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
cmpq $128,%rbx
jbe .Lopen_sse_128
movdqa .Lchacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm7
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movq $10,%r10
.Lopen_sse_init_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jne .Lopen_sse_init_rounds
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
pand .Lclamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
.Lopen_sse_main_loop:
cmpq $256,%rbx
jb .Lopen_sse_tail
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd .Lsse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $4,%rcx
movq %rsi,%r8
.Lopen_sse_main_loop_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
leaq 16(%r8),%r8
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %rcx
jge .Lopen_sse_main_loop_rounds
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
cmpq $-6,%rcx
jg .Lopen_sse_main_loop_rounds
paddd .Lchacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor 0+80(%rbp),%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp .Lopen_sse_main_loop
.Lopen_sse_tail:
testq %rbx,%rbx
jz .Lopen_sse_finalize
cmpq $192,%rbx
ja .Lopen_sse_tail_256
cmpq $128,%rbx
ja .Lopen_sse_tail_192
cmpq $64,%rbx
ja .Lopen_sse_tail_128
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
cmpq $16,%rcx
jb .Lopen_sse_tail_64_rounds
.Lopen_sse_tail_64_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
.Lopen_sse_tail_64_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
cmpq $16,%rcx
jae .Lopen_sse_tail_64_rounds_and_x1hash
cmpq $160,%r8
jne .Lopen_sse_tail_64_rounds
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp .Lopen_sse_tail_64_dec_loop
.Lopen_sse_tail_128:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movq %rbx,%rcx
andq $-16,%rcx
xorq %r8,%r8
.Lopen_sse_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lopen_sse_tail_128_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
cmpq %rcx,%r8
jb .Lopen_sse_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne .Lopen_sse_tail_128_rounds
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
subq $64,%rbx
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
jmp .Lopen_sse_tail_64_dec_loop
.Lopen_sse_tail_192:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movq %rbx,%rcx
movq $160,%r8
cmpq $160,%rcx
cmovgq %r8,%rcx
andq $-16,%rcx
xorq %r8,%r8
.Lopen_sse_tail_192_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lopen_sse_tail_192_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
cmpq %rcx,%r8
jb .Lopen_sse_tail_192_rounds_and_x1hash
cmpq $160,%r8
jne .Lopen_sse_tail_192_rounds
cmpq $176,%rbx
jb .Lopen_sse_tail_192_finish
addq 0+160(%rsi),%r10
adcq 8+160(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
cmpq $192,%rbx
jb .Lopen_sse_tail_192_finish
addq 0+176(%rsi),%r10
adcq 8+176(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lopen_sse_tail_192_finish:
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
subq $128,%rbx
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
jmp .Lopen_sse_tail_64_dec_loop
.Lopen_sse_tail_256:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd .Lsse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
xorq %r8,%r8
.Lopen_sse_tail_256_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
movdqa 0+80(%rbp),%xmm11
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb .Lrol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb .Lrol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
movdqa 0+80(%rbp),%xmm9
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
movdqa 0+80(%rbp),%xmm11
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb .Lrol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb .Lrol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
movdqa 0+80(%rbp),%xmm9
addq $16,%r8
cmpq $160,%r8
jb .Lopen_sse_tail_256_rounds_and_x1hash
movq %rbx,%rcx
andq $-16,%rcx
.Lopen_sse_tail_256_hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%r8
cmpq %rcx,%r8
jb .Lopen_sse_tail_256_hash
paddd .Lchacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqa 0+80(%rbp),%xmm12
subq $192,%rbx
leaq 192(%rsi),%rsi
leaq 192(%rdi),%rdi
.Lopen_sse_tail_64_dec_loop:
cmpq $16,%rbx
jb .Lopen_sse_tail_16_init
subq $16,%rbx
movdqu (%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
jmp .Lopen_sse_tail_64_dec_loop
.Lopen_sse_tail_16_init:
movdqa %xmm0,%xmm1
.Lopen_sse_tail_16:
testq %rbx,%rbx
jz .Lopen_sse_finalize
pxor %xmm3,%xmm3
leaq -1(%rsi,%rbx,1),%rsi
movq %rbx,%r8
.Lopen_sse_tail_16_compose:
pslldq $1,%xmm3
pinsrb $0,(%rsi),%xmm3
subq $1,%rsi
subq $1,%r8
jnz .Lopen_sse_tail_16_compose
.byte 102,73,15,126,221
pextrq $1,%xmm3,%r14
pxor %xmm1,%xmm3
.Lopen_sse_tail_16_extract:
pextrb $0,%xmm3,(%rdi)
psrldq $1,%xmm3
addq $1,%rdi
subq $1,%rbx
jne .Lopen_sse_tail_16_extract
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lopen_sse_finalize:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
.cfi_remember_state
addq $288 + 0 + 32,%rsp
.cfi_adjust_cfa_offset -(288 + 32)
popq %r9
.cfi_adjust_cfa_offset -8
.cfi_restore %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
ret
.Lopen_sse_128:
.cfi_restore_state
movdqu .Lchacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm13,%xmm15
movq $10,%r10
.Lopen_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz .Lopen_sse_128_rounds
paddd .Lchacha20_consts(%rip),%xmm0
paddd .Lchacha20_consts(%rip),%xmm1
paddd .Lchacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm9
paddd %xmm11,%xmm10
paddd %xmm15,%xmm13
paddd .Lsse_inc(%rip),%xmm15
paddd %xmm15,%xmm14
pand .Lclamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
.Lopen_sse_128_xor_hash:
cmpq $16,%rbx
jb .Lopen_sse_tail_16
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm1
movdqu %xmm1,0(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
movdqa %xmm2,%xmm13
movdqa %xmm6,%xmm2
movdqa %xmm10,%xmm6
movdqa %xmm14,%xmm10
jmp .Lopen_sse_128_xor_hash
.size chacha20_poly1305_open_sse41, .-chacha20_poly1305_open_sse41
.cfi_endproc
.globl chacha20_poly1305_seal_sse41
.hidden chacha20_poly1305_seal_sse41
.type chacha20_poly1305_seal_sse41,@function
.align 64
chacha20_poly1305_seal_sse41:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
pushq %r9
.cfi_adjust_cfa_offset 8
.cfi_offset %r9,-64
subq $288 + 0 + 32,%rsp
.cfi_adjust_cfa_offset 288 + 32
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
cmpq $128,%rbx
jbe .Lseal_sse_128
movdqa .Lchacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,%xmm14
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $10,%r10
.Lseal_sse_init_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jnz .Lseal_sse_init_rounds
paddd .Lchacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
pand .Lclamp(%rip),%xmm3
movdqa %xmm3,0+0(%rbp)
movdqa %xmm7,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
cmpq $192,%rbx
ja .Lseal_sse_main_init
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
jmp .Lseal_sse_128_tail_hash
.Lseal_sse_main_init:
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 128(%rdi)
movdqu %xmm4,16 + 128(%rdi)
movdqu %xmm8,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
movq $2,%rcx
movq $8,%r8
cmpq $64,%rbx
jbe .Lseal_sse_tail_64
cmpq $128,%rbx
jbe .Lseal_sse_tail_128
cmpq $192,%rbx
jbe .Lseal_sse_tail_192
.Lseal_sse_main_loop:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd .Lsse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
.align 32
.Lseal_sse_main_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa .Lrol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa .Lrol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
leaq 16(%rdi),%rdi
decq %r8
jge .Lseal_sse_main_rounds
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg .Lseal_sse_main_rounds
paddd .Lchacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm14,0+80(%rbp)
movdqa %xmm14,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm14
pxor %xmm3,%xmm14
movdqu %xmm14,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm14
pxor %xmm7,%xmm14
movdqu %xmm14,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm14
pxor %xmm11,%xmm14
movdqu %xmm14,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm14
pxor %xmm15,%xmm14
movdqu %xmm14,48 + 0(%rdi)
movdqa 0+80(%rbp),%xmm14
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
cmpq $256,%rbx
ja .Lseal_sse_main_loop_xor
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
jmp .Lseal_sse_128_tail_hash
.Lseal_sse_main_loop_xor:
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
subq $256,%rbx
movq $6,%rcx
movq $4,%r8
cmpq $192,%rbx
jg .Lseal_sse_main_loop
movq %rbx,%rcx
testq %rbx,%rbx
je .Lseal_sse_128_tail_hash
movq $6,%rcx
cmpq $128,%rbx
ja .Lseal_sse_tail_192
cmpq $64,%rbx
ja .Lseal_sse_tail_128
.Lseal_sse_tail_64:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
.Lseal_sse_tail_64_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_sse_tail_64_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg .Lseal_sse_tail_64_rounds_and_x2hash
decq %r8
jge .Lseal_sse_tail_64_rounds_and_x1hash
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp .Lseal_sse_128_tail_xor
.Lseal_sse_tail_128:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
.Lseal_sse_tail_128_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_sse_tail_128_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
leaq 16(%rdi),%rdi
decq %rcx
jg .Lseal_sse_tail_128_rounds_and_x2hash
decq %r8
jge .Lseal_sse_tail_128_rounds_and_x1hash
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movq $64,%rcx
subq $64,%rbx
leaq 64(%rsi),%rsi
jmp .Lseal_sse_128_tail_hash
.Lseal_sse_tail_192:
movdqa .Lchacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd .Lsse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
.Lseal_sse_tail_192_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_sse_tail_192_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
leaq 16(%rdi),%rdi
decq %rcx
jg .Lseal_sse_tail_192_rounds_and_x2hash
decq %r8
jge .Lseal_sse_tail_192_rounds_and_x1hash
paddd .Lchacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd .Lchacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd .Lchacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
.Lseal_sse_128_tail_hash:
cmpq $16,%rcx
jb .Lseal_sse_128_tail_xor
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
leaq 16(%rdi),%rdi
jmp .Lseal_sse_128_tail_hash
.Lseal_sse_128_tail_xor:
cmpq $16,%rbx
jb .Lseal_sse_tail_16
subq $16,%rbx
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,0(%rdi)
addq 0(%rdi),%r10
adcq 8(%rdi),%r11
adcq $1,%r12
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
movdqa %xmm1,%xmm12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
jmp .Lseal_sse_128_tail_xor
.Lseal_sse_tail_16:
testq %rbx,%rbx
jz .Lprocess_blocks_of_extra_in
movq %rbx,%r8
movq %rbx,%rcx
leaq -1(%rsi,%rbx,1),%rsi
pxor %xmm15,%xmm15
.Lseal_sse_tail_16_compose:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
decq %rcx
jne .Lseal_sse_tail_16_compose
pxor %xmm0,%xmm15
movq %rbx,%rcx
movdqu %xmm15,%xmm0
.Lseal_sse_tail_16_extract:
pextrb $0,%xmm0,(%rdi)
psrldq $1,%xmm0
addq $1,%rdi
subq $1,%rcx
jnz .Lseal_sse_tail_16_extract
movq 288 + 0 + 32(%rsp),%r9
movq 56(%r9),%r14
movq 48(%r9),%r13
testq %r14,%r14
jz .Lprocess_partial_block
movq $16,%r15
subq %rbx,%r15
cmpq %r15,%r14
jge .Lload_extra_in
movq %r14,%r15
.Lload_extra_in:
leaq -1(%r13,%r15,1),%rsi
addq %r15,%r13
subq %r15,%r14
movq %r13,48(%r9)
movq %r14,56(%r9)
addq %r15,%r8
pxor %xmm11,%xmm11
.Lload_extra_load_loop:
pslldq $1,%xmm11
pinsrb $0,(%rsi),%xmm11
leaq -1(%rsi),%rsi
subq $1,%r15
jnz .Lload_extra_load_loop
movq %rbx,%r15
.Lload_extra_shift_loop:
pslldq $1,%xmm11
subq $1,%r15
jnz .Lload_extra_shift_loop
leaq .Land_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
por %xmm11,%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lprocess_blocks_of_extra_in:
movq 288+32+0 (%rsp),%r9
movq 48(%r9),%rsi
movq 56(%r9),%r8
movq %r8,%rcx
shrq $4,%r8
.Lprocess_extra_hash_loop:
jz process_extra_in_trailer
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rsi),%rsi
subq $1,%r8
jmp .Lprocess_extra_hash_loop
process_extra_in_trailer:
andq $15,%rcx
movq %rcx,%rbx
jz .Ldo_length_block
leaq -1(%rsi,%rcx,1),%rsi
.Lprocess_extra_in_trailer_load:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
subq $1,%rcx
jnz .Lprocess_extra_in_trailer_load
.Lprocess_partial_block:
leaq .Land_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Ldo_length_block:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
.cfi_remember_state
addq $288 + 0 + 32,%rsp
.cfi_adjust_cfa_offset -(288 + 32)
popq %r9
.cfi_adjust_cfa_offset -8
.cfi_restore %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
ret
.Lseal_sse_128:
.cfi_restore_state
movdqu .Lchacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm14
movdqa %xmm14,%xmm12
paddd .Lsse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd .Lsse_inc(%rip),%xmm13
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
movq $10,%r10
.Lseal_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb .Lrol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb .Lrol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb .Lrol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz .Lseal_sse_128_rounds
paddd .Lchacha20_consts(%rip),%xmm0
paddd .Lchacha20_consts(%rip),%xmm1
paddd .Lchacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm8
paddd %xmm11,%xmm9
paddd %xmm15,%xmm12
paddd .Lsse_inc(%rip),%xmm15
paddd %xmm15,%xmm13
pand .Lclamp(%rip),%xmm2
movdqa %xmm2,0+0(%rbp)
movdqa %xmm6,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
jmp .Lseal_sse_128_tail_xor
.size chacha20_poly1305_seal_sse41, .-chacha20_poly1305_seal_sse41
.cfi_endproc
.globl chacha20_poly1305_open_avx2
.hidden chacha20_poly1305_open_avx2
.type chacha20_poly1305_open_avx2,@function
.align 64
chacha20_poly1305_open_avx2:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
pushq %r9
.cfi_adjust_cfa_offset 8
.cfi_offset %r9,-64
subq $288 + 0 + 32,%rsp
.cfi_adjust_cfa_offset 288 + 32
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
vzeroupper
vmovdqa .Lchacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd .Lavx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe .Lopen_avx2_192
cmpq $320,%rbx
jbe .Lopen_avx2_320
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%r10
.Lopen_avx2_init_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
decq %r10
jne .Lopen_avx2_init_rounds
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand .Lclamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
.Lopen_avx2_init_hash:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%rcx
cmpq $64,%rcx
jne .Lopen_avx2_init_hash
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vmovdqu %ymm0,0(%rdi)
vmovdqu %ymm4,32(%rdi)
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
subq $64,%rbx
.Lopen_avx2_main_loop:
cmpq $512,%rbx
jb .Lopen_avx2_main_loop_done
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
.Lopen_avx2_main_loop_rounds:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rsi,%rcx,1),%r10
adcq 8+16(%rsi,%rcx,1),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rsi,%rcx,1),%r10
adcq 8+32(%rsi,%rcx,1),%r11
adcq $1,%r12
leaq 48(%rcx),%rcx
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
cmpq $60*8,%rcx
jne .Lopen_avx2_main_loop_rounds
vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+60*8(%rsi),%r10
adcq 8+60*8(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
addq 0+60*8+16(%rsi),%r10
adcq 8+60*8+16(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
leaq 512(%rdi),%rdi
subq $512,%rbx
jmp .Lopen_avx2_main_loop
.Lopen_avx2_main_loop_done:
testq %rbx,%rbx
vzeroupper
je .Lopen_sse_finalize
cmpq $384,%rbx
ja .Lopen_avx2_tail_512
cmpq $256,%rbx
ja .Lopen_avx2_tail_384
cmpq $128,%rbx
ja .Lopen_avx2_tail_256
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
andq $-16,%rcx
testq %rcx,%rcx
je .Lopen_avx2_tail_128_rounds
.Lopen_avx2_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lopen_avx2_tail_128_rounds:
addq $16,%r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb .Lopen_avx2_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne .Lopen_avx2_tail_128_rounds
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp .Lopen_avx2_tail_128_xor
.Lopen_avx2_tail_256:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $128,%rcx
shrq $4,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
.Lopen_avx2_tail_256_rounds_and_x1hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
.Lopen_avx2_tail_256_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
incq %r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
cmpq %rcx,%r8
jb .Lopen_avx2_tail_256_rounds_and_x1hash
cmpq $10,%r8
jne .Lopen_avx2_tail_256_rounds
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
.Lopen_avx2_tail_256_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg .Lopen_avx2_tail_256_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp .Lopen_avx2_tail_256_hash
.Lopen_avx2_tail_256_done:
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
subq $128,%rbx
jmp .Lopen_avx2_tail_128_xor
.Lopen_avx2_tail_384:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $256,%rcx
shrq $4,%rcx
addq $6,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
.Lopen_avx2_tail_384_rounds_and_x2hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
.Lopen_avx2_tail_384_rounds_and_x1hash:
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
incq %r8
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb .Lopen_avx2_tail_384_rounds_and_x2hash
cmpq $10,%r8
jne .Lopen_avx2_tail_384_rounds_and_x1hash
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
.Lopen_avx2_384_tail_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg .Lopen_avx2_384_tail_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp .Lopen_avx2_384_tail_hash
.Lopen_avx2_384_tail_done:
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp .Lopen_avx2_tail_128_xor
.Lopen_avx2_tail_512:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
movq %rsi,%r8
.Lopen_avx2_tail_512_rounds_and_x2hash:
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
.Lopen_avx2_tail_512_rounds_and_x1hash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+16(%r8),%r10
adcq 8+16(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%r8),%r8
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
incq %rcx
cmpq $4,%rcx
jl .Lopen_avx2_tail_512_rounds_and_x2hash
cmpq $10,%rcx
jne .Lopen_avx2_tail_512_rounds_and_x1hash
movq %rbx,%rcx
subq $384,%rcx
andq $-16,%rcx
.Lopen_avx2_tail_512_hash:
testq %rcx,%rcx
je .Lopen_avx2_tail_512_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
subq $16,%rcx
jmp .Lopen_avx2_tail_512_hash
.Lopen_avx2_tail_512_done:
vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 384(%rsi),%rsi
leaq 384(%rdi),%rdi
subq $384,%rbx
.Lopen_avx2_tail_128_xor:
cmpq $32,%rbx
jb .Lopen_avx2_tail_32_xor
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
jmp .Lopen_avx2_tail_128_xor
.Lopen_avx2_tail_32_xor:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb .Lopen_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm1
vmovdqu %xmm1,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vperm2i128 $0x11,%ymm0,%ymm0,%ymm0
vmovdqa %xmm0,%xmm1
.Lopen_avx2_exit:
vzeroupper
jmp .Lopen_sse_tail_16
.Lopen_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
.Lopen_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne .Lopen_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand .Lclamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
.Lopen_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
.Lopen_avx2_short_hash_and_xor_loop:
cmpq $32,%rbx
jb .Lopen_avx2_short_tail_32
subq $32,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rsi),%r10
adcq 8+16(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp .Lopen_avx2_short_hash_and_xor_loop
.Lopen_avx2_short_tail_32:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb .Lopen_avx2_short_tail_32_exit
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm1
.Lopen_avx2_short_tail_32_exit:
vzeroupper
jmp .Lopen_sse_tail_16
.Lopen_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13
vpaddd .Lavx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
.Lopen_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne .Lopen_avx2_320_rounds
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand .Lclamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp .Lopen_avx2_short
.size chacha20_poly1305_open_avx2, .-chacha20_poly1305_open_avx2
.cfi_endproc
.globl chacha20_poly1305_seal_avx2
.hidden chacha20_poly1305_seal_avx2
.type chacha20_poly1305_seal_avx2,@function
.align 64
chacha20_poly1305_seal_avx2:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
pushq %r9
.cfi_adjust_cfa_offset 8
.cfi_offset %r9,-64
subq $288 + 0 + 32,%rsp
.cfi_adjust_cfa_offset 288 + 32
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
vzeroupper
vmovdqa .Lchacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd .Lavx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe .Lseal_avx2_192
cmpq $320,%rbx
jbe .Lseal_avx2_320
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm4,%ymm7
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vmovdqa %ymm8,%ymm11
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,%ymm15
vpaddd .Lavx2_inc(%rip),%ymm15,%ymm14
vpaddd .Lavx2_inc(%rip),%ymm14,%ymm13
vpaddd .Lavx2_inc(%rip),%ymm13,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm15,0+256(%rbp)
movq $10,%r10
.Lseal_avx2_init_rounds:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %r10
jnz .Lseal_avx2_init_rounds
vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vperm2i128 $0x02,%ymm3,%ymm7,%ymm15
vperm2i128 $0x13,%ymm3,%ymm7,%ymm3
vpand .Lclamp(%rip),%ymm15,%ymm15
vmovdqa %ymm15,0+0(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
vpxor 0(%rsi),%ymm3,%ymm3
vpxor 32(%rsi),%ymm11,%ymm11
vmovdqu %ymm3,0(%rdi)
vmovdqu %ymm11,32(%rdi)
vperm2i128 $0x02,%ymm2,%ymm6,%ymm15
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+64(%rsi),%ymm15,%ymm15
vpxor 32+64(%rsi),%ymm2,%ymm2
vpxor 64+64(%rsi),%ymm6,%ymm6
vpxor 96+64(%rsi),%ymm10,%ymm10
vmovdqu %ymm15,0+64(%rdi)
vmovdqu %ymm2,32+64(%rdi)
vmovdqu %ymm6,64+64(%rdi)
vmovdqu %ymm10,96+64(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm15
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+192(%rsi),%ymm15,%ymm15
vpxor 32+192(%rsi),%ymm1,%ymm1
vpxor 64+192(%rsi),%ymm5,%ymm5
vpxor 96+192(%rsi),%ymm9,%ymm9
vmovdqu %ymm15,0+192(%rdi)
vmovdqu %ymm1,32+192(%rdi)
vmovdqu %ymm5,64+192(%rdi)
vmovdqu %ymm9,96+192(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm15
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm15,%ymm8
leaq 320(%rsi),%rsi
subq $320,%rbx
movq $320,%rcx
cmpq $128,%rbx
jbe .Lseal_avx2_short_hash_remainder
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vpxor 64(%rsi),%ymm8,%ymm8
vpxor 96(%rsi),%ymm12,%ymm12
vmovdqu %ymm0,320(%rdi)
vmovdqu %ymm4,352(%rdi)
vmovdqu %ymm8,384(%rdi)
vmovdqu %ymm12,416(%rdi)
leaq 128(%rsi),%rsi
subq $128,%rbx
movq $8,%rcx
movq $2,%r8
cmpq $128,%rbx
jbe .Lseal_avx2_tail_128
cmpq $256,%rbx
jbe .Lseal_avx2_tail_256
cmpq $384,%rbx
jbe .Lseal_avx2_tail_384
cmpq $512,%rbx
jbe .Lseal_avx2_tail_512
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
subq $16,%rdi
movq $9,%rcx
jmp .Lseal_avx2_main_loop_rounds_entry
.align 32
.Lseal_avx2_main_loop:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%rcx
.align 32
.Lseal_avx2_main_loop_rounds:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
.Lseal_avx2_main_loop_rounds_entry:
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rdi),%r10
adcq 8+32(%rdi),%r11
adcq $1,%r12
leaq 48(%rdi),%rdi
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %rcx
jne .Lseal_avx2_main_loop_rounds
vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
subq $512,%rbx
cmpq $512,%rbx
jg .Lseal_avx2_main_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
movq $10,%rcx
xorq %r8,%r8
cmpq $384,%rbx
ja .Lseal_avx2_tail_512
cmpq $256,%rbx
ja .Lseal_avx2_tail_384
cmpq $128,%rbx
ja .Lseal_avx2_tail_256
.Lseal_avx2_tail_128:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
.Lseal_avx2_tail_128_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_avx2_tail_128_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg .Lseal_avx2_tail_128_rounds_and_3xhash
decq %r8
jge .Lseal_avx2_tail_128_rounds_and_2xhash
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp .Lseal_avx2_short_loop
.Lseal_avx2_tail_256:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
.Lseal_avx2_tail_256_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_avx2_tail_256_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg .Lseal_avx2_tail_256_rounds_and_3xhash
decq %r8
jge .Lseal_avx2_tail_256_rounds_and_2xhash
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $128,%rcx
leaq 128(%rsi),%rsi
subq $128,%rbx
jmp .Lseal_avx2_short_hash_remainder
.Lseal_avx2_tail_384:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
.Lseal_avx2_tail_384_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_avx2_tail_384_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
leaq 32(%rdi),%rdi
decq %rcx
jg .Lseal_avx2_tail_384_rounds_and_3xhash
decq %r8
jge .Lseal_avx2_tail_384_rounds_and_2xhash
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $256,%rcx
leaq 256(%rsi),%rsi
subq $256,%rbx
jmp .Lseal_avx2_short_hash_remainder
.Lseal_avx2_tail_512:
vmovdqa .Lchacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa .Lavx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
.Lseal_avx2_tail_512_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
.Lseal_avx2_tail_512_rounds_and_2xhash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
addq %rax,%r15
adcq %rdx,%r9
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa .Lrol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa .Lrol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg .Lseal_avx2_tail_512_rounds_and_3xhash
decq %r8
jge .Lseal_avx2_tail_512_rounds_and_2xhash
vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $384,%rcx
leaq 384(%rsi),%rsi
subq $384,%rbx
jmp .Lseal_avx2_short_hash_remainder
.Lseal_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13
vpaddd .Lavx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
.Lseal_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb .Lrol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne .Lseal_avx2_320_rounds
vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0
vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1
vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand .Lclamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp .Lseal_avx2_short
.Lseal_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
.Lseal_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb .Lrol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb .Lrol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne .Lseal_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand .Lclamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
.Lseal_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
.Lseal_avx2_short_hash_remainder:
cmpq $16,%rcx
jb .Lseal_avx2_short_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
addq $16,%rdi
jmp .Lseal_avx2_short_hash_remainder
.Lseal_avx2_short_loop:
cmpq $32,%rbx
jb .Lseal_avx2_short_tail
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp .Lseal_avx2_short_loop
.Lseal_avx2_short_tail:
cmpq $16,%rbx
jb .Lseal_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm0
.Lseal_avx2_exit:
vzeroupper
jmp .Lseal_sse_tail_16
.cfi_endproc
.size chacha20_poly1305_seal_avx2, .-chacha20_poly1305_seal_avx2
#endif
|
mktmansour/MKT-KSA-Geolocation-Security
| 34,022
|
.cargo-home/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha256-armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
#endif
.text
.globl _sha256_block_data_order_nohw
.private_extern _sha256_block_data_order_nohw
.align 6
_sha256_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*4
ldp w20,w21,[x0] // load context
ldp w22,w23,[x0,#2*4]
ldp w24,w25,[x0,#4*4]
add x2,x1,x2,lsl#6 // end of input
ldp w26,w27,[x0,#6*4]
adrp x30,LK256@PAGE
add x30,x30,LK256@PAGEOFF
stp x0,x2,[x29,#96]
Loop:
ldp w3,w4,[x1],#2*4
ldr w19,[x30],#4 // *K++
eor w28,w21,w22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev w3,w3 // 0
#endif
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w6,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w3 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w4,w4 // 1
#endif
ldp w5,w6,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w7,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w4 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w5,w5 // 2
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w8,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w5 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w6,w6 // 3
#endif
ldp w7,w8,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w9,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w6 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w7,w7 // 4
#endif
add w24,w24,w17 // h+=Sigma0(a)
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w10,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w7 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w10,ror#11 // Sigma1(e)
ror w10,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w10,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w8,w8 // 5
#endif
ldp w9,w10,[x1],#2*4
add w23,w23,w17 // h+=Sigma0(a)
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w11,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w8 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w11,ror#11 // Sigma1(e)
ror w11,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w11,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w9,w9 // 6
#endif
add w22,w22,w17 // h+=Sigma0(a)
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w12,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w9 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w12,ror#11 // Sigma1(e)
ror w12,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w12,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w10,w10 // 7
#endif
ldp w11,w12,[x1],#2*4
add w21,w21,w17 // h+=Sigma0(a)
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
eor w13,w25,w25,ror#14
and w17,w26,w25
bic w28,w27,w25
add w20,w20,w10 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w13,ror#11 // Sigma1(e)
ror w13,w21,#2
add w20,w20,w17 // h+=Ch(e,f,g)
eor w17,w21,w21,ror#9
add w20,w20,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w24,w24,w20 // d+=h
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w13,w17,ror#13 // Sigma0(a)
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w20,w20,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w11,w11 // 8
#endif
add w20,w20,w17 // h+=Sigma0(a)
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w14,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w11 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w14,ror#11 // Sigma1(e)
ror w14,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w14,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w12,w12 // 9
#endif
ldp w13,w14,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w15,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w12 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w15,ror#11 // Sigma1(e)
ror w15,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w15,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w13,w13 // 10
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w0,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w13 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w0,ror#11 // Sigma1(e)
ror w0,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w0,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w14,w14 // 11
#endif
ldp w15,w0,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w6,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w14 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w15,w15 // 12
#endif
add w24,w24,w17 // h+=Sigma0(a)
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w7,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w15 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w0,w0 // 13
#endif
ldp w1,w2,[x1]
add w23,w23,w17 // h+=Sigma0(a)
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w8,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w0 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w1,w1 // 14
#endif
ldr w6,[sp,#12]
add w22,w22,w17 // h+=Sigma0(a)
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w9,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w1 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w2,w2 // 15
#endif
ldr w7,[sp,#0]
add w21,w21,w17 // h+=Sigma0(a)
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
Loop_16_xx:
ldr w8,[sp,#4]
str w11,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w10,w5,#7
and w17,w25,w24
ror w9,w2,#17
bic w19,w26,w24
ror w11,w20,#2
add w27,w27,w3 // h+=X[i]
eor w16,w16,w24,ror#11
eor w10,w10,w5,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w11,w11,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w9,w9,w2,ror#19
eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w11,w20,ror#22 // Sigma0(a)
eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
add w4,w4,w13
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w4,w4,w10
add w27,w27,w17 // h+=Sigma0(a)
add w4,w4,w9
ldr w9,[sp,#8]
str w12,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w11,w6,#7
and w17,w24,w23
ror w10,w3,#17
bic w28,w25,w23
ror w12,w27,#2
add w26,w26,w4 // h+=X[i]
eor w16,w16,w23,ror#11
eor w11,w11,w6,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w12,w12,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w10,w10,w3,ror#19
eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w12,w27,ror#22 // Sigma0(a)
eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
add w5,w5,w14
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w5,w5,w11
add w26,w26,w17 // h+=Sigma0(a)
add w5,w5,w10
ldr w10,[sp,#12]
str w13,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w12,w7,#7
and w17,w23,w22
ror w11,w4,#17
bic w19,w24,w22
ror w13,w26,#2
add w25,w25,w5 // h+=X[i]
eor w16,w16,w22,ror#11
eor w12,w12,w7,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w13,w13,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w11,w11,w4,ror#19
eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w13,w26,ror#22 // Sigma0(a)
eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
add w6,w6,w15
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w6,w6,w12
add w25,w25,w17 // h+=Sigma0(a)
add w6,w6,w11
ldr w11,[sp,#0]
str w14,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w13,w8,#7
and w17,w22,w21
ror w12,w5,#17
bic w28,w23,w21
ror w14,w25,#2
add w24,w24,w6 // h+=X[i]
eor w16,w16,w21,ror#11
eor w13,w13,w8,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w14,w14,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w12,w12,w5,ror#19
eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w14,w25,ror#22 // Sigma0(a)
eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
add w7,w7,w0
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w7,w7,w13
add w24,w24,w17 // h+=Sigma0(a)
add w7,w7,w12
ldr w12,[sp,#4]
str w15,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w14,w9,#7
and w17,w21,w20
ror w13,w6,#17
bic w19,w22,w20
ror w15,w24,#2
add w23,w23,w7 // h+=X[i]
eor w16,w16,w20,ror#11
eor w14,w14,w9,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w15,w15,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w13,w13,w6,ror#19
eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w15,w24,ror#22 // Sigma0(a)
eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
add w8,w8,w1
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w8,w8,w14
add w23,w23,w17 // h+=Sigma0(a)
add w8,w8,w13
ldr w13,[sp,#8]
str w0,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w15,w10,#7
and w17,w20,w27
ror w14,w7,#17
bic w28,w21,w27
ror w0,w23,#2
add w22,w22,w8 // h+=X[i]
eor w16,w16,w27,ror#11
eor w15,w15,w10,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w0,w0,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w14,w14,w7,ror#19
eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w0,w23,ror#22 // Sigma0(a)
eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
add w9,w9,w2
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w9,w9,w15
add w22,w22,w17 // h+=Sigma0(a)
add w9,w9,w14
ldr w14,[sp,#12]
str w1,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w0,w11,#7
and w17,w27,w26
ror w15,w8,#17
bic w19,w20,w26
ror w1,w22,#2
add w21,w21,w9 // h+=X[i]
eor w16,w16,w26,ror#11
eor w0,w0,w11,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w1,w1,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w15,w15,w8,ror#19
eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w1,w22,ror#22 // Sigma0(a)
eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
add w10,w10,w3
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w10,w10,w0
add w21,w21,w17 // h+=Sigma0(a)
add w10,w10,w15
ldr w15,[sp,#0]
str w2,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w1,w12,#7
and w17,w26,w25
ror w0,w9,#17
bic w28,w27,w25
ror w2,w21,#2
add w20,w20,w10 // h+=X[i]
eor w16,w16,w25,ror#11
eor w1,w1,w12,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w2,w2,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w0,w0,w9,ror#19
eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w2,w21,ror#22 // Sigma0(a)
eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
add w11,w11,w4
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w11,w11,w1
add w20,w20,w17 // h+=Sigma0(a)
add w11,w11,w0
ldr w0,[sp,#4]
str w3,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w2,w13,#7
and w17,w25,w24
ror w1,w10,#17
bic w19,w26,w24
ror w3,w20,#2
add w27,w27,w11 // h+=X[i]
eor w16,w16,w24,ror#11
eor w2,w2,w13,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w3,w3,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w1,w1,w10,ror#19
eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w3,w20,ror#22 // Sigma0(a)
eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
add w12,w12,w5
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w12,w12,w2
add w27,w27,w17 // h+=Sigma0(a)
add w12,w12,w1
ldr w1,[sp,#8]
str w4,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w3,w14,#7
and w17,w24,w23
ror w2,w11,#17
bic w28,w25,w23
ror w4,w27,#2
add w26,w26,w12 // h+=X[i]
eor w16,w16,w23,ror#11
eor w3,w3,w14,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w4,w4,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w2,w2,w11,ror#19
eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w4,w27,ror#22 // Sigma0(a)
eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
add w13,w13,w6
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w13,w13,w3
add w26,w26,w17 // h+=Sigma0(a)
add w13,w13,w2
ldr w2,[sp,#12]
str w5,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w4,w15,#7
and w17,w23,w22
ror w3,w12,#17
bic w19,w24,w22
ror w5,w26,#2
add w25,w25,w13 // h+=X[i]
eor w16,w16,w22,ror#11
eor w4,w4,w15,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w5,w5,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w3,w3,w12,ror#19
eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w5,w26,ror#22 // Sigma0(a)
eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
add w14,w14,w7
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w14,w14,w4
add w25,w25,w17 // h+=Sigma0(a)
add w14,w14,w3
ldr w3,[sp,#0]
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w5,w0,#7
and w17,w22,w21
ror w4,w13,#17
bic w28,w23,w21
ror w6,w25,#2
add w24,w24,w14 // h+=X[i]
eor w16,w16,w21,ror#11
eor w5,w5,w0,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w6,w6,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w4,w4,w13,ror#19
eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w25,ror#22 // Sigma0(a)
eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
add w15,w15,w8
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w15,w15,w5
add w24,w24,w17 // h+=Sigma0(a)
add w15,w15,w4
ldr w4,[sp,#4]
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w6,w1,#7
and w17,w21,w20
ror w5,w14,#17
bic w19,w22,w20
ror w7,w24,#2
add w23,w23,w15 // h+=X[i]
eor w16,w16,w20,ror#11
eor w6,w6,w1,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w7,w7,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w5,w5,w14,ror#19
eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w24,ror#22 // Sigma0(a)
eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
add w0,w0,w9
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w0,w0,w6
add w23,w23,w17 // h+=Sigma0(a)
add w0,w0,w5
ldr w5,[sp,#8]
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w7,w2,#7
and w17,w20,w27
ror w6,w15,#17
bic w28,w21,w27
ror w8,w23,#2
add w22,w22,w0 // h+=X[i]
eor w16,w16,w27,ror#11
eor w7,w7,w2,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w8,w8,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w6,w6,w15,ror#19
eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w23,ror#22 // Sigma0(a)
eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
add w1,w1,w10
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w1,w1,w7
add w22,w22,w17 // h+=Sigma0(a)
add w1,w1,w6
ldr w6,[sp,#12]
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w8,w3,#7
and w17,w27,w26
ror w7,w0,#17
bic w19,w20,w26
ror w9,w22,#2
add w21,w21,w1 // h+=X[i]
eor w16,w16,w26,ror#11
eor w8,w8,w3,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w9,w9,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w7,w7,w0,ror#19
eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w22,ror#22 // Sigma0(a)
eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
add w2,w2,w11
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w2,w2,w8
add w21,w21,w17 // h+=Sigma0(a)
add w2,w2,w7
ldr w7,[sp,#0]
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
cbnz w19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#260 // rewind
ldp w3,w4,[x0]
ldp w5,w6,[x0,#2*4]
add x1,x1,#14*4 // advance input pointer
ldp w7,w8,[x0,#4*4]
add w20,w20,w3
ldp w9,w10,[x0,#6*4]
add w21,w21,w4
add w22,w22,w5
add w23,w23,w6
stp w20,w21,[x0]
add w24,w24,w7
add w25,w25,w8
stp w22,w23,[x0,#2*4]
add w26,w26,w9
add w27,w27,w10
cmp x1,x2
stp w24,w25,[x0,#4*4]
stp w26,w27,[x0,#6*4]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*4
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section __TEXT,__const
.align 6
LK256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0 //terminator
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl _sha256_block_data_order_hw
.private_extern _sha256_block_data_order_hw
.align 6
_sha256_block_data_order_hw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v0.4s,v1.4s},[x0]
adrp x3,LK256@PAGE
add x3,x3,LK256@PAGEOFF
Loop_hw:
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
sub x2,x2,#1
ld1 {v16.4s},[x3],#16
rev32 v4.16b,v4.16b
rev32 v5.16b,v5.16b
rev32 v6.16b,v6.16b
rev32 v7.16b,v7.16b
orr v18.16b,v0.16b,v0.16b // offload
orr v19.16b,v1.16b,v1.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
ld1 {v17.4s},[x3]
add v16.4s,v16.4s,v6.4s
sub x3,x3,#64*4-16 // rewind
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
add v17.4s,v17.4s,v7.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
add v0.4s,v0.4s,v18.4s
add v1.4s,v1.4s,v19.4s
cbnz x2,Loop_hw
st1 {v0.4s,v1.4s},[x0]
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.