repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
wlsfx/bnbb
| 91,466
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86_64/crypto/cipher_extra/aesni-sha256-x86_64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.extern OPENSSL_ia32cap_P
.hidden OPENSSL_ia32cap_P
.globl aesni_cbc_sha256_enc
.hidden aesni_cbc_sha256_enc
.type aesni_cbc_sha256_enc,@function
.align 16
aesni_cbc_sha256_enc:
.cfi_startproc
leaq OPENSSL_ia32cap_P(%rip),%r11
movl $1,%eax
cmpq $0,%rdi
je .Lprobe
movl 0(%r11),%eax
movq 4(%r11),%r10
btq $61,%r10
jc aesni_cbc_sha256_enc_shaext
movq %r10,%r11
shrq $32,%r11
testl $2048,%r10d
jnz aesni_cbc_sha256_enc_xop
andl $296,%r11d
cmpl $296,%r11d
je aesni_cbc_sha256_enc_avx2
andl $268435456,%r10d
jnz aesni_cbc_sha256_enc_avx
ud2
xorl %eax,%eax
cmpq $0,%rdi
je .Lprobe
ud2
.Lprobe:
.byte 0xf3,0xc3
.cfi_endproc
.size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc
.section .rodata
.align 64
.type K256,@object
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0,0,0,0, 0,0,0,0, -1,-1,-1,-1
.long 0,0,0,0, 0,0,0,0
.byte 65,69,83,78,73,45,67,66,67,43,83,72,65,50,53,54,32,115,116,105,116,99,104,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.align 64
.type aesni_cbc_sha256_enc_xop,@function
.align 64
aesni_cbc_sha256_enc_xop:
.cfi_startproc
.Lxop_shortcut:
movq 8(%rsp),%r10
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
subq $128,%rsp
andq $-64,%rsp
shlq $6,%rdx
subq %rdi,%rsi
subq %rdi,%r10
addq %rdi,%rdx
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %r8,64+32(%rsp)
movq %r9,64+40(%rsp)
movq %r10,64+48(%rsp)
movq %rax,120(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xf8,0x00,0x06,0x23,0x08
.Lprologue_xop:
vzeroall
movq %rdi,%r12
leaq 128(%rcx),%rdi
leaq K256+544(%rip),%r13
movl 240-128(%rdi),%r14d
movq %r9,%r15
movq %r10,%rsi
vmovdqu (%r8),%xmm8
subq $9,%r14
movl 0(%r15),%eax
movl 4(%r15),%ebx
movl 8(%r15),%ecx
movl 12(%r15),%edx
movl 16(%r15),%r8d
movl 20(%r15),%r9d
movl 24(%r15),%r10d
movl 28(%r15),%r11d
vmovdqa 0(%r13,%r14,8),%xmm14
vmovdqa 16(%r13,%r14,8),%xmm13
vmovdqa 32(%r13,%r14,8),%xmm12
vmovdqu 0-128(%rdi),%xmm10
jmp .Lloop_xop
.align 16
.Lloop_xop:
vmovdqa K256+512(%rip),%xmm7
vmovdqu 0(%rsi,%r12,1),%xmm0
vmovdqu 16(%rsi,%r12,1),%xmm1
vmovdqu 32(%rsi,%r12,1),%xmm2
vmovdqu 48(%rsi,%r12,1),%xmm3
vpshufb %xmm7,%xmm0,%xmm0
leaq K256(%rip),%rbp
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd 0(%rbp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 32(%rbp),%xmm1,%xmm5
vpaddd 64(%rbp),%xmm2,%xmm6
vpaddd 96(%rbp),%xmm3,%xmm7
vmovdqa %xmm4,0(%rsp)
movl %eax,%r14d
vmovdqa %xmm5,16(%rsp)
movl %ebx,%esi
vmovdqa %xmm6,32(%rsp)
xorl %ecx,%esi
vmovdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lxop_00_47
.align 16
.Lxop_00_47:
subq $-32*4,%rbp
vmovdqu (%r12),%xmm9
movq %r12,64+0(%rsp)
vpalignr $4,%xmm0,%xmm1,%xmm4
rorl $14,%r13d
movl %r14d,%eax
vpalignr $4,%xmm2,%xmm3,%xmm7
movl %r9d,%r12d
xorl %r8d,%r13d
.byte 143,232,120,194,236,14
rorl $9,%r14d
xorl %r10d,%r12d
vpsrld $3,%xmm4,%xmm4
rorl $5,%r13d
xorl %eax,%r14d
vpaddd %xmm7,%xmm0,%xmm0
andl %r8d,%r12d
vpxor %xmm10,%xmm9,%xmm9
vmovdqu 16-128(%rdi),%xmm10
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
.byte 143,232,120,194,245,11
rorl $11,%r14d
xorl %r10d,%r12d
vpxor %xmm5,%xmm4,%xmm4
xorl %ebx,%r15d
rorl $6,%r13d
addl %r12d,%r11d
andl %r15d,%esi
.byte 143,232,120,194,251,13
xorl %eax,%r14d
addl %r13d,%r11d
vpxor %xmm6,%xmm4,%xmm4
xorl %ebx,%esi
addl %r11d,%edx
vpsrld $10,%xmm3,%xmm6
rorl $2,%r14d
addl %esi,%r11d
vpaddd %xmm4,%xmm0,%xmm0
movl %edx,%r13d
addl %r11d,%r14d
.byte 143,232,120,194,239,2
rorl $14,%r13d
movl %r14d,%r11d
vpxor %xmm6,%xmm7,%xmm7
movl %r8d,%r12d
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%r12d
vpxor %xmm5,%xmm7,%xmm7
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
vpxor %xmm8,%xmm9,%xmm9
xorl %edx,%r13d
vpsrldq $8,%xmm7,%xmm7
addl 4(%rsp),%r10d
movl %r11d,%esi
rorl $11,%r14d
xorl %r9d,%r12d
vpaddd %xmm7,%xmm0,%xmm0
xorl %eax,%esi
rorl $6,%r13d
addl %r12d,%r10d
andl %esi,%r15d
.byte 143,232,120,194,248,13
xorl %r11d,%r14d
addl %r13d,%r10d
vpsrld $10,%xmm0,%xmm6
xorl %eax,%r15d
addl %r10d,%ecx
.byte 143,232,120,194,239,2
rorl $2,%r14d
addl %r15d,%r10d
vpxor %xmm6,%xmm7,%xmm7
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
vpxor %xmm5,%xmm7,%xmm7
movl %edx,%r12d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r12d
vpslldq $8,%xmm7,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 32-128(%rdi),%xmm10
xorl %ecx,%r13d
vpaddd %xmm7,%xmm0,%xmm0
addl 8(%rsp),%r9d
movl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r12d
vpaddd 0(%rbp),%xmm0,%xmm6
xorl %r11d,%r15d
rorl $6,%r13d
addl %r12d,%r9d
andl %r15d,%esi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%esi
addl %r9d,%ebx
rorl $2,%r14d
addl %esi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 48-128(%rdi),%xmm10
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%esi
rorl $11,%r14d
xorl %edx,%r12d
xorl %r10d,%esi
rorl $6,%r13d
addl %r12d,%r8d
andl %esi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
addl %r8d,%eax
rorl $2,%r14d
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,0(%rsp)
vpalignr $4,%xmm1,%xmm2,%xmm4
rorl $14,%r13d
movl %r14d,%r8d
vpalignr $4,%xmm3,%xmm0,%xmm7
movl %ebx,%r12d
xorl %eax,%r13d
.byte 143,232,120,194,236,14
rorl $9,%r14d
xorl %ecx,%r12d
vpsrld $3,%xmm4,%xmm4
rorl $5,%r13d
xorl %r8d,%r14d
vpaddd %xmm7,%xmm1,%xmm1
andl %eax,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 64-128(%rdi),%xmm10
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
.byte 143,232,120,194,245,11
rorl $11,%r14d
xorl %ecx,%r12d
vpxor %xmm5,%xmm4,%xmm4
xorl %r9d,%r15d
rorl $6,%r13d
addl %r12d,%edx
andl %r15d,%esi
.byte 143,232,120,194,248,13
xorl %r8d,%r14d
addl %r13d,%edx
vpxor %xmm6,%xmm4,%xmm4
xorl %r9d,%esi
addl %edx,%r11d
vpsrld $10,%xmm0,%xmm6
rorl $2,%r14d
addl %esi,%edx
vpaddd %xmm4,%xmm1,%xmm1
movl %r11d,%r13d
addl %edx,%r14d
.byte 143,232,120,194,239,2
rorl $14,%r13d
movl %r14d,%edx
vpxor %xmm6,%xmm7,%xmm7
movl %eax,%r12d
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%r12d
vpxor %xmm5,%xmm7,%xmm7
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 80-128(%rdi),%xmm10
xorl %r11d,%r13d
vpsrldq $8,%xmm7,%xmm7
addl 20(%rsp),%ecx
movl %edx,%esi
rorl $11,%r14d
xorl %ebx,%r12d
vpaddd %xmm7,%xmm1,%xmm1
xorl %r8d,%esi
rorl $6,%r13d
addl %r12d,%ecx
andl %esi,%r15d
.byte 143,232,120,194,249,13
xorl %edx,%r14d
addl %r13d,%ecx
vpsrld $10,%xmm1,%xmm6
xorl %r8d,%r15d
addl %ecx,%r10d
.byte 143,232,120,194,239,2
rorl $2,%r14d
addl %r15d,%ecx
vpxor %xmm6,%xmm7,%xmm7
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
vpxor %xmm5,%xmm7,%xmm7
movl %r11d,%r12d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r12d
vpslldq $8,%xmm7,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 96-128(%rdi),%xmm10
xorl %r10d,%r13d
vpaddd %xmm7,%xmm1,%xmm1
addl 24(%rsp),%ebx
movl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r12d
vpaddd 32(%rbp),%xmm1,%xmm6
xorl %edx,%r15d
rorl $6,%r13d
addl %r12d,%ebx
andl %r15d,%esi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%esi
addl %ebx,%r9d
rorl $2,%r14d
addl %esi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 112-128(%rdi),%xmm10
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%esi
rorl $11,%r14d
xorl %r11d,%r12d
xorl %ecx,%esi
rorl $6,%r13d
addl %r12d,%eax
andl %esi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
addl %eax,%r8d
rorl $2,%r14d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,16(%rsp)
vpalignr $4,%xmm2,%xmm3,%xmm4
rorl $14,%r13d
movl %r14d,%eax
vpalignr $4,%xmm0,%xmm1,%xmm7
movl %r9d,%r12d
xorl %r8d,%r13d
.byte 143,232,120,194,236,14
rorl $9,%r14d
xorl %r10d,%r12d
vpsrld $3,%xmm4,%xmm4
rorl $5,%r13d
xorl %eax,%r14d
vpaddd %xmm7,%xmm2,%xmm2
andl %r8d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 128-128(%rdi),%xmm10
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
.byte 143,232,120,194,245,11
rorl $11,%r14d
xorl %r10d,%r12d
vpxor %xmm5,%xmm4,%xmm4
xorl %ebx,%r15d
rorl $6,%r13d
addl %r12d,%r11d
andl %r15d,%esi
.byte 143,232,120,194,249,13
xorl %eax,%r14d
addl %r13d,%r11d
vpxor %xmm6,%xmm4,%xmm4
xorl %ebx,%esi
addl %r11d,%edx
vpsrld $10,%xmm1,%xmm6
rorl $2,%r14d
addl %esi,%r11d
vpaddd %xmm4,%xmm2,%xmm2
movl %edx,%r13d
addl %r11d,%r14d
.byte 143,232,120,194,239,2
rorl $14,%r13d
movl %r14d,%r11d
vpxor %xmm6,%xmm7,%xmm7
movl %r8d,%r12d
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%r12d
vpxor %xmm5,%xmm7,%xmm7
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 144-128(%rdi),%xmm10
xorl %edx,%r13d
vpsrldq $8,%xmm7,%xmm7
addl 36(%rsp),%r10d
movl %r11d,%esi
rorl $11,%r14d
xorl %r9d,%r12d
vpaddd %xmm7,%xmm2,%xmm2
xorl %eax,%esi
rorl $6,%r13d
addl %r12d,%r10d
andl %esi,%r15d
.byte 143,232,120,194,250,13
xorl %r11d,%r14d
addl %r13d,%r10d
vpsrld $10,%xmm2,%xmm6
xorl %eax,%r15d
addl %r10d,%ecx
.byte 143,232,120,194,239,2
rorl $2,%r14d
addl %r15d,%r10d
vpxor %xmm6,%xmm7,%xmm7
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
vpxor %xmm5,%xmm7,%xmm7
movl %edx,%r12d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r12d
vpslldq $8,%xmm7,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 160-128(%rdi),%xmm10
xorl %ecx,%r13d
vpaddd %xmm7,%xmm2,%xmm2
addl 40(%rsp),%r9d
movl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r12d
vpaddd 64(%rbp),%xmm2,%xmm6
xorl %r11d,%r15d
rorl $6,%r13d
addl %r12d,%r9d
andl %r15d,%esi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%esi
addl %r9d,%ebx
rorl $2,%r14d
addl %esi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 176-128(%rdi),%xmm10
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%esi
rorl $11,%r14d
xorl %edx,%r12d
xorl %r10d,%esi
rorl $6,%r13d
addl %r12d,%r8d
andl %esi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
addl %r8d,%eax
rorl $2,%r14d
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,32(%rsp)
vpalignr $4,%xmm3,%xmm0,%xmm4
rorl $14,%r13d
movl %r14d,%r8d
vpalignr $4,%xmm1,%xmm2,%xmm7
movl %ebx,%r12d
xorl %eax,%r13d
.byte 143,232,120,194,236,14
rorl $9,%r14d
xorl %ecx,%r12d
vpsrld $3,%xmm4,%xmm4
rorl $5,%r13d
xorl %r8d,%r14d
vpaddd %xmm7,%xmm3,%xmm3
andl %eax,%r12d
vpand %xmm12,%xmm11,%xmm8
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 192-128(%rdi),%xmm10
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
.byte 143,232,120,194,245,11
rorl $11,%r14d
xorl %ecx,%r12d
vpxor %xmm5,%xmm4,%xmm4
xorl %r9d,%r15d
rorl $6,%r13d
addl %r12d,%edx
andl %r15d,%esi
.byte 143,232,120,194,250,13
xorl %r8d,%r14d
addl %r13d,%edx
vpxor %xmm6,%xmm4,%xmm4
xorl %r9d,%esi
addl %edx,%r11d
vpsrld $10,%xmm2,%xmm6
rorl $2,%r14d
addl %esi,%edx
vpaddd %xmm4,%xmm3,%xmm3
movl %r11d,%r13d
addl %edx,%r14d
.byte 143,232,120,194,239,2
rorl $14,%r13d
movl %r14d,%edx
vpxor %xmm6,%xmm7,%xmm7
movl %eax,%r12d
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%r12d
vpxor %xmm5,%xmm7,%xmm7
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 208-128(%rdi),%xmm10
xorl %r11d,%r13d
vpsrldq $8,%xmm7,%xmm7
addl 52(%rsp),%ecx
movl %edx,%esi
rorl $11,%r14d
xorl %ebx,%r12d
vpaddd %xmm7,%xmm3,%xmm3
xorl %r8d,%esi
rorl $6,%r13d
addl %r12d,%ecx
andl %esi,%r15d
.byte 143,232,120,194,251,13
xorl %edx,%r14d
addl %r13d,%ecx
vpsrld $10,%xmm3,%xmm6
xorl %r8d,%r15d
addl %ecx,%r10d
.byte 143,232,120,194,239,2
rorl $2,%r14d
addl %r15d,%ecx
vpxor %xmm6,%xmm7,%xmm7
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
vpxor %xmm5,%xmm7,%xmm7
movl %r11d,%r12d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r12d
vpslldq $8,%xmm7,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
vpand %xmm13,%xmm11,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 224-128(%rdi),%xmm10
xorl %r10d,%r13d
vpaddd %xmm7,%xmm3,%xmm3
addl 56(%rsp),%ebx
movl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r12d
vpaddd 96(%rbp),%xmm3,%xmm6
xorl %edx,%r15d
rorl $6,%r13d
addl %r12d,%ebx
andl %r15d,%esi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%esi
addl %ebx,%r9d
rorl $2,%r14d
addl %esi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpor %xmm11,%xmm8,%xmm8
vaesenclast %xmm10,%xmm9,%xmm11
vmovdqu 0-128(%rdi),%xmm10
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%esi
rorl $11,%r14d
xorl %r11d,%r12d
xorl %ecx,%esi
rorl $6,%r13d
addl %r12d,%eax
andl %esi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
addl %eax,%r8d
rorl $2,%r14d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,48(%rsp)
movq 64+0(%rsp),%r12
vpand %xmm14,%xmm11,%xmm11
movq 64+8(%rsp),%r15
vpor %xmm11,%xmm8,%xmm8
vmovdqu %xmm8,(%r15,%r12,1)
leaq 16(%r12),%r12
cmpb $0,131(%rbp)
jne .Lxop_00_47
vmovdqu (%r12),%xmm9
movq %r12,64+0(%rsp)
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpxor %xmm10,%xmm9,%xmm9
vmovdqu 16-128(%rdi),%xmm10
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r12d
xorl %ebx,%r15d
rorl $6,%r13d
addl %r12d,%r11d
andl %r15d,%esi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%esi
addl %r11d,%edx
rorl $2,%r14d
addl %esi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
vpxor %xmm8,%xmm9,%xmm9
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%esi
rorl $11,%r14d
xorl %r9d,%r12d
xorl %eax,%esi
rorl $6,%r13d
addl %r12d,%r10d
andl %esi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
addl %r10d,%ecx
rorl $2,%r14d
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 32-128(%rdi),%xmm10
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r12d
xorl %r11d,%r15d
rorl $6,%r13d
addl %r12d,%r9d
andl %r15d,%esi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%esi
addl %r9d,%ebx
rorl $2,%r14d
addl %esi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 48-128(%rdi),%xmm10
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%esi
rorl $11,%r14d
xorl %edx,%r12d
xorl %r10d,%esi
rorl $6,%r13d
addl %r12d,%r8d
andl %esi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
addl %r8d,%eax
rorl $2,%r14d
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 64-128(%rdi),%xmm10
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r12d
xorl %r9d,%r15d
rorl $6,%r13d
addl %r12d,%edx
andl %r15d,%esi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%esi
addl %edx,%r11d
rorl $2,%r14d
addl %esi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 80-128(%rdi),%xmm10
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%esi
rorl $11,%r14d
xorl %ebx,%r12d
xorl %r8d,%esi
rorl $6,%r13d
addl %r12d,%ecx
andl %esi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
addl %ecx,%r10d
rorl $2,%r14d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 96-128(%rdi),%xmm10
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r12d
xorl %edx,%r15d
rorl $6,%r13d
addl %r12d,%ebx
andl %r15d,%esi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%esi
addl %ebx,%r9d
rorl $2,%r14d
addl %esi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 112-128(%rdi),%xmm10
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%esi
rorl $11,%r14d
xorl %r11d,%r12d
xorl %ecx,%esi
rorl $6,%r13d
addl %r12d,%eax
andl %esi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
addl %eax,%r8d
rorl $2,%r14d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 128-128(%rdi),%xmm10
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r12d
xorl %ebx,%r15d
rorl $6,%r13d
addl %r12d,%r11d
andl %r15d,%esi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%esi
addl %r11d,%edx
rorl $2,%r14d
addl %esi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 144-128(%rdi),%xmm10
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%esi
rorl $11,%r14d
xorl %r9d,%r12d
xorl %eax,%esi
rorl $6,%r13d
addl %r12d,%r10d
andl %esi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
addl %r10d,%ecx
rorl $2,%r14d
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 160-128(%rdi),%xmm10
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r12d
xorl %r11d,%r15d
rorl $6,%r13d
addl %r12d,%r9d
andl %r15d,%esi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%esi
addl %r9d,%ebx
rorl $2,%r14d
addl %esi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 176-128(%rdi),%xmm10
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%esi
rorl $11,%r14d
xorl %edx,%r12d
xorl %r10d,%esi
rorl $6,%r13d
addl %r12d,%r8d
andl %esi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
addl %r8d,%eax
rorl $2,%r14d
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpand %xmm12,%xmm11,%xmm8
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 192-128(%rdi),%xmm10
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r12d
xorl %r9d,%r15d
rorl $6,%r13d
addl %r12d,%edx
andl %r15d,%esi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%esi
addl %edx,%r11d
rorl $2,%r14d
addl %esi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 208-128(%rdi),%xmm10
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%esi
rorl $11,%r14d
xorl %ebx,%r12d
xorl %r8d,%esi
rorl $6,%r13d
addl %r12d,%ecx
andl %esi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
addl %ecx,%r10d
rorl $2,%r14d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
vpand %xmm13,%xmm11,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 224-128(%rdi),%xmm10
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r12d
xorl %edx,%r15d
rorl $6,%r13d
addl %r12d,%ebx
andl %r15d,%esi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%esi
addl %ebx,%r9d
rorl $2,%r14d
addl %esi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpor %xmm11,%xmm8,%xmm8
vaesenclast %xmm10,%xmm9,%xmm11
vmovdqu 0-128(%rdi),%xmm10
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%esi
rorl $11,%r14d
xorl %r11d,%r12d
xorl %ecx,%esi
rorl $6,%r13d
addl %r12d,%eax
andl %esi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
addl %eax,%r8d
rorl $2,%r14d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%r12
movq 64+8(%rsp),%r13
movq 64+40(%rsp),%r15
movq 64+48(%rsp),%rsi
vpand %xmm14,%xmm11,%xmm11
movl %r14d,%eax
vpor %xmm11,%xmm8,%xmm8
vmovdqu %xmm8,(%r12,%r13,1)
leaq 16(%r12),%r12
addl 0(%r15),%eax
addl 4(%r15),%ebx
addl 8(%r15),%ecx
addl 12(%r15),%edx
addl 16(%r15),%r8d
addl 20(%r15),%r9d
addl 24(%r15),%r10d
addl 28(%r15),%r11d
cmpq 64+16(%rsp),%r12
movl %eax,0(%r15)
movl %ebx,4(%r15)
movl %ecx,8(%r15)
movl %edx,12(%r15)
movl %r8d,16(%r15)
movl %r9d,20(%r15)
movl %r10d,24(%r15)
movl %r11d,28(%r15)
jb .Lloop_xop
movq 64+32(%rsp),%r8
movq 120(%rsp),%rsi
.cfi_def_cfa %rsi,8
vmovdqu %xmm8,(%r8)
vzeroall
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_xop:
.byte 0xf3,0xc3
.cfi_endproc
.size aesni_cbc_sha256_enc_xop,.-aesni_cbc_sha256_enc_xop
.type aesni_cbc_sha256_enc_avx,@function
.align 64
aesni_cbc_sha256_enc_avx:
.cfi_startproc
.Lavx_shortcut:
movq 8(%rsp),%r10
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
subq $128,%rsp
andq $-64,%rsp
shlq $6,%rdx
subq %rdi,%rsi
subq %rdi,%r10
addq %rdi,%rdx
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %r8,64+32(%rsp)
movq %r9,64+40(%rsp)
movq %r10,64+48(%rsp)
movq %rax,120(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xf8,0x00,0x06,0x23,0x08
.Lprologue_avx:
vzeroall
movq %rdi,%r12
leaq 128(%rcx),%rdi
leaq K256+544(%rip),%r13
movl 240-128(%rdi),%r14d
movq %r9,%r15
movq %r10,%rsi
vmovdqu (%r8),%xmm8
subq $9,%r14
movl 0(%r15),%eax
movl 4(%r15),%ebx
movl 8(%r15),%ecx
movl 12(%r15),%edx
movl 16(%r15),%r8d
movl 20(%r15),%r9d
movl 24(%r15),%r10d
movl 28(%r15),%r11d
vmovdqa 0(%r13,%r14,8),%xmm14
vmovdqa 16(%r13,%r14,8),%xmm13
vmovdqa 32(%r13,%r14,8),%xmm12
vmovdqu 0-128(%rdi),%xmm10
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa K256+512(%rip),%xmm7
vmovdqu 0(%rsi,%r12,1),%xmm0
vmovdqu 16(%rsi,%r12,1),%xmm1
vmovdqu 32(%rsi,%r12,1),%xmm2
vmovdqu 48(%rsi,%r12,1),%xmm3
vpshufb %xmm7,%xmm0,%xmm0
leaq K256(%rip),%rbp
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd 0(%rbp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 32(%rbp),%xmm1,%xmm5
vpaddd 64(%rbp),%xmm2,%xmm6
vpaddd 96(%rbp),%xmm3,%xmm7
vmovdqa %xmm4,0(%rsp)
movl %eax,%r14d
vmovdqa %xmm5,16(%rsp)
movl %ebx,%esi
vmovdqa %xmm6,32(%rsp)
xorl %ecx,%esi
vmovdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
subq $-32*4,%rbp
vmovdqu (%r12),%xmm9
movq %r12,64+0(%rsp)
vpalignr $4,%xmm0,%xmm1,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm2,%xmm3,%xmm7
xorl %r8d,%r13d
shrdl $9,%r14d,%r14d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm0,%xmm0
vpxor %xmm10,%xmm9,%xmm9
vmovdqu 16-128(%rdi),%xmm10
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
shrdl $11,%r14d,%r14d
xorl %r10d,%r12d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
shrdl $6,%r13d,%r13d
addl %r12d,%r11d
andl %r15d,%esi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%esi
vpshufd $250,%xmm3,%xmm7
addl %r11d,%edx
shrdl $2,%r14d,%r14d
addl %esi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
xorl %edx,%r13d
vpslld $11,%xmm5,%xmm5
shrdl $9,%r14d,%r14d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
vpxor %xmm8,%xmm9,%xmm9
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 4(%rsp),%r10d
movl %r11d,%esi
shrdl $11,%r14d,%r14d
vpxor %xmm5,%xmm4,%xmm4
xorl %r9d,%r12d
xorl %eax,%esi
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
addl %r12d,%r10d
andl %esi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm0,%xmm0
addl %r13d,%r10d
xorl %eax,%r15d
addl %r10d,%ecx
vpxor %xmm7,%xmm6,%xmm6
shrdl $2,%r14d,%r14d
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
xorl %ecx,%r13d
shrdl $9,%r14d,%r14d
vpshufd $132,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpsrldq $8,%xmm6,%xmm6
andl %ecx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 32-128(%rdi),%xmm10
xorl %ecx,%r13d
addl 8(%rsp),%r9d
vpaddd %xmm6,%xmm0,%xmm0
movl %r10d,%r15d
shrdl $11,%r14d,%r14d
xorl %r8d,%r12d
vpshufd $80,%xmm0,%xmm7
xorl %r11d,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%r9d
vpsrld $10,%xmm7,%xmm6
andl %r15d,%esi
xorl %r10d,%r14d
addl %r13d,%r9d
vpsrlq $17,%xmm7,%xmm7
xorl %r11d,%esi
addl %r9d,%ebx
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %esi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpsrlq $2,%xmm7,%xmm7
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpxor %xmm7,%xmm6,%xmm6
xorl %ebx,%r13d
shrdl $9,%r14d,%r14d
xorl %edx,%r12d
vpshufd $232,%xmm6,%xmm6
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpslldq $8,%xmm6,%xmm6
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 48-128(%rdi),%xmm10
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%esi
vpaddd %xmm6,%xmm0,%xmm0
shrdl $11,%r14d,%r14d
xorl %edx,%r12d
xorl %r10d,%esi
vpaddd 0(%rbp),%xmm0,%xmm6
shrdl $6,%r13d,%r13d
addl %r12d,%r8d
andl %esi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
addl %r8d,%eax
shrdl $2,%r14d,%r14d
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,0(%rsp)
vpalignr $4,%xmm1,%xmm2,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm3,%xmm0,%xmm7
xorl %eax,%r13d
shrdl $9,%r14d,%r14d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm1,%xmm1
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 64-128(%rdi),%xmm10
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
shrdl $11,%r14d,%r14d
xorl %ecx,%r12d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
shrdl $6,%r13d,%r13d
addl %r12d,%edx
andl %r15d,%esi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%esi
vpshufd $250,%xmm0,%xmm7
addl %edx,%r11d
shrdl $2,%r14d,%r14d
addl %esi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
xorl %r11d,%r13d
vpslld $11,%xmm5,%xmm5
shrdl $9,%r14d,%r14d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 80-128(%rdi),%xmm10
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 20(%rsp),%ecx
movl %edx,%esi
shrdl $11,%r14d,%r14d
vpxor %xmm5,%xmm4,%xmm4
xorl %ebx,%r12d
xorl %r8d,%esi
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
addl %r12d,%ecx
andl %esi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm1,%xmm1
addl %r13d,%ecx
xorl %r8d,%r15d
addl %ecx,%r10d
vpxor %xmm7,%xmm6,%xmm6
shrdl $2,%r14d,%r14d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
xorl %r10d,%r13d
shrdl $9,%r14d,%r14d
vpshufd $132,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpsrldq $8,%xmm6,%xmm6
andl %r10d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 96-128(%rdi),%xmm10
xorl %r10d,%r13d
addl 24(%rsp),%ebx
vpaddd %xmm6,%xmm1,%xmm1
movl %ecx,%r15d
shrdl $11,%r14d,%r14d
xorl %eax,%r12d
vpshufd $80,%xmm1,%xmm7
xorl %edx,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%ebx
vpsrld $10,%xmm7,%xmm6
andl %r15d,%esi
xorl %ecx,%r14d
addl %r13d,%ebx
vpsrlq $17,%xmm7,%xmm7
xorl %edx,%esi
addl %ebx,%r9d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %esi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpsrlq $2,%xmm7,%xmm7
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpxor %xmm7,%xmm6,%xmm6
xorl %r9d,%r13d
shrdl $9,%r14d,%r14d
xorl %r11d,%r12d
vpshufd $232,%xmm6,%xmm6
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpslldq $8,%xmm6,%xmm6
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 112-128(%rdi),%xmm10
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%esi
vpaddd %xmm6,%xmm1,%xmm1
shrdl $11,%r14d,%r14d
xorl %r11d,%r12d
xorl %ecx,%esi
vpaddd 32(%rbp),%xmm1,%xmm6
shrdl $6,%r13d,%r13d
addl %r12d,%eax
andl %esi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
addl %eax,%r8d
shrdl $2,%r14d,%r14d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,16(%rsp)
vpalignr $4,%xmm2,%xmm3,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm0,%xmm1,%xmm7
xorl %r8d,%r13d
shrdl $9,%r14d,%r14d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm2,%xmm2
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 128-128(%rdi),%xmm10
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
shrdl $11,%r14d,%r14d
xorl %r10d,%r12d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
shrdl $6,%r13d,%r13d
addl %r12d,%r11d
andl %r15d,%esi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%esi
vpshufd $250,%xmm1,%xmm7
addl %r11d,%edx
shrdl $2,%r14d,%r14d
addl %esi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
xorl %edx,%r13d
vpslld $11,%xmm5,%xmm5
shrdl $9,%r14d,%r14d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 144-128(%rdi),%xmm10
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 36(%rsp),%r10d
movl %r11d,%esi
shrdl $11,%r14d,%r14d
vpxor %xmm5,%xmm4,%xmm4
xorl %r9d,%r12d
xorl %eax,%esi
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
addl %r12d,%r10d
andl %esi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm2,%xmm2
addl %r13d,%r10d
xorl %eax,%r15d
addl %r10d,%ecx
vpxor %xmm7,%xmm6,%xmm6
shrdl $2,%r14d,%r14d
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
xorl %ecx,%r13d
shrdl $9,%r14d,%r14d
vpshufd $132,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpsrldq $8,%xmm6,%xmm6
andl %ecx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 160-128(%rdi),%xmm10
xorl %ecx,%r13d
addl 40(%rsp),%r9d
vpaddd %xmm6,%xmm2,%xmm2
movl %r10d,%r15d
shrdl $11,%r14d,%r14d
xorl %r8d,%r12d
vpshufd $80,%xmm2,%xmm7
xorl %r11d,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%r9d
vpsrld $10,%xmm7,%xmm6
andl %r15d,%esi
xorl %r10d,%r14d
addl %r13d,%r9d
vpsrlq $17,%xmm7,%xmm7
xorl %r11d,%esi
addl %r9d,%ebx
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %esi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpsrlq $2,%xmm7,%xmm7
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpxor %xmm7,%xmm6,%xmm6
xorl %ebx,%r13d
shrdl $9,%r14d,%r14d
xorl %edx,%r12d
vpshufd $232,%xmm6,%xmm6
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpslldq $8,%xmm6,%xmm6
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 176-128(%rdi),%xmm10
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%esi
vpaddd %xmm6,%xmm2,%xmm2
shrdl $11,%r14d,%r14d
xorl %edx,%r12d
xorl %r10d,%esi
vpaddd 64(%rbp),%xmm2,%xmm6
shrdl $6,%r13d,%r13d
addl %r12d,%r8d
andl %esi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
addl %r8d,%eax
shrdl $2,%r14d,%r14d
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,32(%rsp)
vpalignr $4,%xmm3,%xmm0,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm1,%xmm2,%xmm7
xorl %eax,%r13d
shrdl $9,%r14d,%r14d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm3,%xmm3
vpand %xmm12,%xmm11,%xmm8
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 192-128(%rdi),%xmm10
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
shrdl $11,%r14d,%r14d
xorl %ecx,%r12d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
shrdl $6,%r13d,%r13d
addl %r12d,%edx
andl %r15d,%esi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%esi
vpshufd $250,%xmm2,%xmm7
addl %edx,%r11d
shrdl $2,%r14d,%r14d
addl %esi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
xorl %r11d,%r13d
vpslld $11,%xmm5,%xmm5
shrdl $9,%r14d,%r14d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 208-128(%rdi),%xmm10
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 52(%rsp),%ecx
movl %edx,%esi
shrdl $11,%r14d,%r14d
vpxor %xmm5,%xmm4,%xmm4
xorl %ebx,%r12d
xorl %r8d,%esi
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
addl %r12d,%ecx
andl %esi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm3,%xmm3
addl %r13d,%ecx
xorl %r8d,%r15d
addl %ecx,%r10d
vpxor %xmm7,%xmm6,%xmm6
shrdl $2,%r14d,%r14d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
xorl %r10d,%r13d
shrdl $9,%r14d,%r14d
vpshufd $132,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpsrldq $8,%xmm6,%xmm6
andl %r10d,%r12d
vpand %xmm13,%xmm11,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 224-128(%rdi),%xmm10
xorl %r10d,%r13d
addl 56(%rsp),%ebx
vpaddd %xmm6,%xmm3,%xmm3
movl %ecx,%r15d
shrdl $11,%r14d,%r14d
xorl %eax,%r12d
vpshufd $80,%xmm3,%xmm7
xorl %edx,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%ebx
vpsrld $10,%xmm7,%xmm6
andl %r15d,%esi
xorl %ecx,%r14d
addl %r13d,%ebx
vpsrlq $17,%xmm7,%xmm7
xorl %edx,%esi
addl %ebx,%r9d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %esi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpsrlq $2,%xmm7,%xmm7
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpxor %xmm7,%xmm6,%xmm6
xorl %r9d,%r13d
shrdl $9,%r14d,%r14d
xorl %r11d,%r12d
vpshufd $232,%xmm6,%xmm6
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpslldq $8,%xmm6,%xmm6
vpor %xmm11,%xmm8,%xmm8
vaesenclast %xmm10,%xmm9,%xmm11
vmovdqu 0-128(%rdi),%xmm10
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%esi
vpaddd %xmm6,%xmm3,%xmm3
shrdl $11,%r14d,%r14d
xorl %r11d,%r12d
xorl %ecx,%esi
vpaddd 96(%rbp),%xmm3,%xmm6
shrdl $6,%r13d,%r13d
addl %r12d,%eax
andl %esi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
addl %eax,%r8d
shrdl $2,%r14d,%r14d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,48(%rsp)
movq 64+0(%rsp),%r12
vpand %xmm14,%xmm11,%xmm11
movq 64+8(%rsp),%r15
vpor %xmm11,%xmm8,%xmm8
vmovdqu %xmm8,(%r15,%r12,1)
leaq 16(%r12),%r12
cmpb $0,131(%rbp)
jne .Lavx_00_47
vmovdqu (%r12),%xmm9
movq %r12,64+0(%rsp)
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
xorl %r8d,%r13d
shrdl $9,%r14d,%r14d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpxor %xmm10,%xmm9,%xmm9
vmovdqu 16-128(%rdi),%xmm10
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
shrdl $11,%r14d,%r14d
xorl %r10d,%r12d
xorl %ebx,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%r11d
andl %r15d,%esi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%esi
addl %r11d,%edx
shrdl $2,%r14d,%r14d
addl %esi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
xorl %edx,%r13d
shrdl $9,%r14d,%r14d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
vpxor %xmm8,%xmm9,%xmm9
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%esi
shrdl $11,%r14d,%r14d
xorl %r9d,%r12d
xorl %eax,%esi
shrdl $6,%r13d,%r13d
addl %r12d,%r10d
andl %esi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
addl %r10d,%ecx
shrdl $2,%r14d,%r14d
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
xorl %ecx,%r13d
shrdl $9,%r14d,%r14d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 32-128(%rdi),%xmm10
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
shrdl $11,%r14d,%r14d
xorl %r8d,%r12d
xorl %r11d,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%r9d
andl %r15d,%esi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%esi
addl %r9d,%ebx
shrdl $2,%r14d,%r14d
addl %esi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
xorl %ebx,%r13d
shrdl $9,%r14d,%r14d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 48-128(%rdi),%xmm10
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%esi
shrdl $11,%r14d,%r14d
xorl %edx,%r12d
xorl %r10d,%esi
shrdl $6,%r13d,%r13d
addl %r12d,%r8d
andl %esi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
addl %r8d,%eax
shrdl $2,%r14d,%r14d
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
xorl %eax,%r13d
shrdl $9,%r14d,%r14d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 64-128(%rdi),%xmm10
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
shrdl $11,%r14d,%r14d
xorl %ecx,%r12d
xorl %r9d,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%edx
andl %r15d,%esi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%esi
addl %edx,%r11d
shrdl $2,%r14d,%r14d
addl %esi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
xorl %r11d,%r13d
shrdl $9,%r14d,%r14d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 80-128(%rdi),%xmm10
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%esi
shrdl $11,%r14d,%r14d
xorl %ebx,%r12d
xorl %r8d,%esi
shrdl $6,%r13d,%r13d
addl %r12d,%ecx
andl %esi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
addl %ecx,%r10d
shrdl $2,%r14d,%r14d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
xorl %r10d,%r13d
shrdl $9,%r14d,%r14d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 96-128(%rdi),%xmm10
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
shrdl $11,%r14d,%r14d
xorl %eax,%r12d
xorl %edx,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%ebx
andl %r15d,%esi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%esi
addl %ebx,%r9d
shrdl $2,%r14d,%r14d
addl %esi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
xorl %r9d,%r13d
shrdl $9,%r14d,%r14d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 112-128(%rdi),%xmm10
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%esi
shrdl $11,%r14d,%r14d
xorl %r11d,%r12d
xorl %ecx,%esi
shrdl $6,%r13d,%r13d
addl %r12d,%eax
andl %esi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
addl %eax,%r8d
shrdl $2,%r14d,%r14d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
xorl %r8d,%r13d
shrdl $9,%r14d,%r14d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 128-128(%rdi),%xmm10
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
shrdl $11,%r14d,%r14d
xorl %r10d,%r12d
xorl %ebx,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%r11d
andl %r15d,%esi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%esi
addl %r11d,%edx
shrdl $2,%r14d,%r14d
addl %esi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
xorl %edx,%r13d
shrdl $9,%r14d,%r14d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 144-128(%rdi),%xmm10
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%esi
shrdl $11,%r14d,%r14d
xorl %r9d,%r12d
xorl %eax,%esi
shrdl $6,%r13d,%r13d
addl %r12d,%r10d
andl %esi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
addl %r10d,%ecx
shrdl $2,%r14d,%r14d
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
xorl %ecx,%r13d
shrdl $9,%r14d,%r14d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 160-128(%rdi),%xmm10
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
shrdl $11,%r14d,%r14d
xorl %r8d,%r12d
xorl %r11d,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%r9d
andl %r15d,%esi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%esi
addl %r9d,%ebx
shrdl $2,%r14d,%r14d
addl %esi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
xorl %ebx,%r13d
shrdl $9,%r14d,%r14d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 176-128(%rdi),%xmm10
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%esi
shrdl $11,%r14d,%r14d
xorl %edx,%r12d
xorl %r10d,%esi
shrdl $6,%r13d,%r13d
addl %r12d,%r8d
andl %esi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
addl %r8d,%eax
shrdl $2,%r14d,%r14d
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
xorl %eax,%r13d
shrdl $9,%r14d,%r14d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpand %xmm12,%xmm11,%xmm8
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 192-128(%rdi),%xmm10
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
shrdl $11,%r14d,%r14d
xorl %ecx,%r12d
xorl %r9d,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%edx
andl %r15d,%esi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%esi
addl %edx,%r11d
shrdl $2,%r14d,%r14d
addl %esi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
xorl %r11d,%r13d
shrdl $9,%r14d,%r14d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 208-128(%rdi),%xmm10
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%esi
shrdl $11,%r14d,%r14d
xorl %ebx,%r12d
xorl %r8d,%esi
shrdl $6,%r13d,%r13d
addl %r12d,%ecx
andl %esi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
addl %ecx,%r10d
shrdl $2,%r14d,%r14d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
xorl %r10d,%r13d
shrdl $9,%r14d,%r14d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
vpand %xmm13,%xmm11,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 224-128(%rdi),%xmm10
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
shrdl $11,%r14d,%r14d
xorl %eax,%r12d
xorl %edx,%r15d
shrdl $6,%r13d,%r13d
addl %r12d,%ebx
andl %r15d,%esi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%esi
addl %ebx,%r9d
shrdl $2,%r14d,%r14d
addl %esi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
xorl %r9d,%r13d
shrdl $9,%r14d,%r14d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpor %xmm11,%xmm8,%xmm8
vaesenclast %xmm10,%xmm9,%xmm11
vmovdqu 0-128(%rdi),%xmm10
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%esi
shrdl $11,%r14d,%r14d
xorl %r11d,%r12d
xorl %ecx,%esi
shrdl $6,%r13d,%r13d
addl %r12d,%eax
andl %esi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
addl %eax,%r8d
shrdl $2,%r14d,%r14d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%r12
movq 64+8(%rsp),%r13
movq 64+40(%rsp),%r15
movq 64+48(%rsp),%rsi
vpand %xmm14,%xmm11,%xmm11
movl %r14d,%eax
vpor %xmm11,%xmm8,%xmm8
vmovdqu %xmm8,(%r12,%r13,1)
leaq 16(%r12),%r12
addl 0(%r15),%eax
addl 4(%r15),%ebx
addl 8(%r15),%ecx
addl 12(%r15),%edx
addl 16(%r15),%r8d
addl 20(%r15),%r9d
addl 24(%r15),%r10d
addl 28(%r15),%r11d
cmpq 64+16(%rsp),%r12
movl %eax,0(%r15)
movl %ebx,4(%r15)
movl %ecx,8(%r15)
movl %edx,12(%r15)
movl %r8d,16(%r15)
movl %r9d,20(%r15)
movl %r10d,24(%r15)
movl %r11d,28(%r15)
jb .Lloop_avx
movq 64+32(%rsp),%r8
movq 120(%rsp),%rsi
.cfi_def_cfa %rsi,8
vmovdqu %xmm8,(%r8)
vzeroall
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx:
.byte 0xf3,0xc3
.cfi_endproc
.size aesni_cbc_sha256_enc_avx,.-aesni_cbc_sha256_enc_avx
.type aesni_cbc_sha256_enc_avx2,@function
.align 64
aesni_cbc_sha256_enc_avx2:
.cfi_startproc
.Lavx2_shortcut:
movq 8(%rsp),%r10
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
subq $576,%rsp
andq $-1024,%rsp
addq $448,%rsp
shlq $6,%rdx
subq %rdi,%rsi
subq %rdi,%r10
addq %rdi,%rdx
movq %rdx,64+16(%rsp)
movq %r8,64+32(%rsp)
movq %r9,64+40(%rsp)
movq %r10,64+48(%rsp)
movq %rax,120(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xf8,0x00,0x06,0x23,0x08
.Lprologue_avx2:
vzeroall
movq %rdi,%r13
vpinsrq $1,%rsi,%xmm15,%xmm15
leaq 128(%rcx),%rdi
leaq K256+544(%rip),%r12
movl 240-128(%rdi),%r14d
movq %r9,%r15
movq %r10,%rsi
vmovdqu (%r8),%xmm8
leaq -9(%r14),%r14
vmovdqa 0(%r12,%r14,8),%xmm14
vmovdqa 16(%r12,%r14,8),%xmm13
vmovdqa 32(%r12,%r14,8),%xmm12
subq $-64,%r13
movl 0(%r15),%eax
leaq (%rsi,%r13,1),%r12
movl 4(%r15),%ebx
cmpq %rdx,%r13
movl 8(%r15),%ecx
cmoveq %rsp,%r12
movl 12(%r15),%edx
movl 16(%r15),%r8d
movl 20(%r15),%r9d
movl 24(%r15),%r10d
movl 28(%r15),%r11d
vmovdqu 0-128(%rdi),%xmm10
jmp .Loop_avx2
.align 16
.Loop_avx2:
vmovdqa K256+512(%rip),%ymm7
vmovdqu -64+0(%rsi,%r13,1),%xmm0
vmovdqu -64+16(%rsi,%r13,1),%xmm1
vmovdqu -64+32(%rsi,%r13,1),%xmm2
vmovdqu -64+48(%rsi,%r13,1),%xmm3
vinserti128 $1,(%r12),%ymm0,%ymm0
vinserti128 $1,16(%r12),%ymm1,%ymm1
vpshufb %ymm7,%ymm0,%ymm0
vinserti128 $1,32(%r12),%ymm2,%ymm2
vpshufb %ymm7,%ymm1,%ymm1
vinserti128 $1,48(%r12),%ymm3,%ymm3
leaq K256(%rip),%rbp
vpshufb %ymm7,%ymm2,%ymm2
leaq -64(%r13),%r13
vpaddd 0(%rbp),%ymm0,%ymm4
vpshufb %ymm7,%ymm3,%ymm3
vpaddd 32(%rbp),%ymm1,%ymm5
vpaddd 64(%rbp),%ymm2,%ymm6
vpaddd 96(%rbp),%ymm3,%ymm7
vmovdqa %ymm4,0(%rsp)
xorl %r14d,%r14d
vmovdqa %ymm5,32(%rsp)
movq 120(%rsp),%rsi
.cfi_def_cfa %rsi,8
leaq -64(%rsp),%rsp
movq %rsi,-8(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08
movl %ebx,%esi
vmovdqa %ymm6,0(%rsp)
xorl %ecx,%esi
vmovdqa %ymm7,32(%rsp)
movl %r9d,%r12d
subq $-32*4,%rbp
jmp .Lavx2_00_47
.align 16
.Lavx2_00_47:
vmovdqu (%r13),%xmm9
vpinsrq $0,%r13,%xmm15,%xmm15
leaq -64(%rsp),%rsp
.cfi_escape 0x0f,0x05,0x77,0x38,0x06,0x23,0x08
pushq 64-8(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08
leaq 8(%rsp),%rsp
.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08
vpalignr $4,%ymm0,%ymm1,%ymm4
addl 0+128(%rsp),%r11d
andl %r8d,%r12d
rorxl $25,%r8d,%r13d
vpalignr $4,%ymm2,%ymm3,%ymm7
rorxl $11,%r8d,%r15d
leal (%rax,%r14,1),%eax
leal (%r11,%r12,1),%r11d
vpsrld $7,%ymm4,%ymm6
andnl %r10d,%r8d,%r12d
xorl %r15d,%r13d
rorxl $6,%r8d,%r14d
vpaddd %ymm7,%ymm0,%ymm0
leal (%r11,%r12,1),%r11d
xorl %r14d,%r13d
movl %eax,%r15d
vpsrld $3,%ymm4,%ymm7
rorxl $22,%eax,%r12d
leal (%r11,%r13,1),%r11d
xorl %ebx,%r15d
vpslld $14,%ymm4,%ymm5
rorxl $13,%eax,%r14d
rorxl $2,%eax,%r13d
leal (%rdx,%r11,1),%edx
vpxor %ymm6,%ymm7,%ymm4
andl %r15d,%esi
vpxor %xmm10,%xmm9,%xmm9
vmovdqu 16-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ebx,%esi
vpshufd $250,%ymm3,%ymm7
xorl %r13d,%r14d
leal (%r11,%rsi,1),%r11d
movl %r8d,%r12d
vpsrld $11,%ymm6,%ymm6
addl 4+128(%rsp),%r10d
andl %edx,%r12d
rorxl $25,%edx,%r13d
vpxor %ymm5,%ymm4,%ymm4
rorxl $11,%edx,%esi
leal (%r11,%r14,1),%r11d
leal (%r10,%r12,1),%r10d
vpslld $11,%ymm5,%ymm5
andnl %r9d,%edx,%r12d
xorl %esi,%r13d
rorxl $6,%edx,%r14d
vpxor %ymm6,%ymm4,%ymm4
leal (%r10,%r12,1),%r10d
xorl %r14d,%r13d
movl %r11d,%esi
vpsrld $10,%ymm7,%ymm6
rorxl $22,%r11d,%r12d
leal (%r10,%r13,1),%r10d
xorl %eax,%esi
vpxor %ymm5,%ymm4,%ymm4
rorxl $13,%r11d,%r14d
rorxl $2,%r11d,%r13d
leal (%rcx,%r10,1),%ecx
vpsrlq $17,%ymm7,%ymm7
andl %esi,%r15d
vpxor %xmm8,%xmm9,%xmm9
xorl %r12d,%r14d
xorl %eax,%r15d
vpaddd %ymm4,%ymm0,%ymm0
xorl %r13d,%r14d
leal (%r10,%r15,1),%r10d
movl %edx,%r12d
vpxor %ymm7,%ymm6,%ymm6
addl 8+128(%rsp),%r9d
andl %ecx,%r12d
rorxl $25,%ecx,%r13d
vpsrlq $2,%ymm7,%ymm7
rorxl $11,%ecx,%r15d
leal (%r10,%r14,1),%r10d
leal (%r9,%r12,1),%r9d
vpxor %ymm7,%ymm6,%ymm6
andnl %r8d,%ecx,%r12d
xorl %r15d,%r13d
rorxl $6,%ecx,%r14d
vpshufd $132,%ymm6,%ymm6
leal (%r9,%r12,1),%r9d
xorl %r14d,%r13d
movl %r10d,%r15d
vpsrldq $8,%ymm6,%ymm6
rorxl $22,%r10d,%r12d
leal (%r9,%r13,1),%r9d
xorl %r11d,%r15d
vpaddd %ymm6,%ymm0,%ymm0
rorxl $13,%r10d,%r14d
rorxl $2,%r10d,%r13d
leal (%rbx,%r9,1),%ebx
vpshufd $80,%ymm0,%ymm7
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 32-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r11d,%esi
vpsrld $10,%ymm7,%ymm6
xorl %r13d,%r14d
leal (%r9,%rsi,1),%r9d
movl %ecx,%r12d
vpsrlq $17,%ymm7,%ymm7
addl 12+128(%rsp),%r8d
andl %ebx,%r12d
rorxl $25,%ebx,%r13d
vpxor %ymm7,%ymm6,%ymm6
rorxl $11,%ebx,%esi
leal (%r9,%r14,1),%r9d
leal (%r8,%r12,1),%r8d
vpsrlq $2,%ymm7,%ymm7
andnl %edx,%ebx,%r12d
xorl %esi,%r13d
rorxl $6,%ebx,%r14d
vpxor %ymm7,%ymm6,%ymm6
leal (%r8,%r12,1),%r8d
xorl %r14d,%r13d
movl %r9d,%esi
vpshufd $232,%ymm6,%ymm6
rorxl $22,%r9d,%r12d
leal (%r8,%r13,1),%r8d
xorl %r10d,%esi
vpslldq $8,%ymm6,%ymm6
rorxl $13,%r9d,%r14d
rorxl $2,%r9d,%r13d
leal (%rax,%r8,1),%eax
vpaddd %ymm6,%ymm0,%ymm0
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 48-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r10d,%r15d
vpaddd 0(%rbp),%ymm0,%ymm6
xorl %r13d,%r14d
leal (%r8,%r15,1),%r8d
movl %ebx,%r12d
vmovdqa %ymm6,0(%rsp)
vpalignr $4,%ymm1,%ymm2,%ymm4
addl 32+128(%rsp),%edx
andl %eax,%r12d
rorxl $25,%eax,%r13d
vpalignr $4,%ymm3,%ymm0,%ymm7
rorxl $11,%eax,%r15d
leal (%r8,%r14,1),%r8d
leal (%rdx,%r12,1),%edx
vpsrld $7,%ymm4,%ymm6
andnl %ecx,%eax,%r12d
xorl %r15d,%r13d
rorxl $6,%eax,%r14d
vpaddd %ymm7,%ymm1,%ymm1
leal (%rdx,%r12,1),%edx
xorl %r14d,%r13d
movl %r8d,%r15d
vpsrld $3,%ymm4,%ymm7
rorxl $22,%r8d,%r12d
leal (%rdx,%r13,1),%edx
xorl %r9d,%r15d
vpslld $14,%ymm4,%ymm5
rorxl $13,%r8d,%r14d
rorxl $2,%r8d,%r13d
leal (%r11,%rdx,1),%r11d
vpxor %ymm6,%ymm7,%ymm4
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 64-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r9d,%esi
vpshufd $250,%ymm0,%ymm7
xorl %r13d,%r14d
leal (%rdx,%rsi,1),%edx
movl %eax,%r12d
vpsrld $11,%ymm6,%ymm6
addl 36+128(%rsp),%ecx
andl %r11d,%r12d
rorxl $25,%r11d,%r13d
vpxor %ymm5,%ymm4,%ymm4
rorxl $11,%r11d,%esi
leal (%rdx,%r14,1),%edx
leal (%rcx,%r12,1),%ecx
vpslld $11,%ymm5,%ymm5
andnl %ebx,%r11d,%r12d
xorl %esi,%r13d
rorxl $6,%r11d,%r14d
vpxor %ymm6,%ymm4,%ymm4
leal (%rcx,%r12,1),%ecx
xorl %r14d,%r13d
movl %edx,%esi
vpsrld $10,%ymm7,%ymm6
rorxl $22,%edx,%r12d
leal (%rcx,%r13,1),%ecx
xorl %r8d,%esi
vpxor %ymm5,%ymm4,%ymm4
rorxl $13,%edx,%r14d
rorxl $2,%edx,%r13d
leal (%r10,%rcx,1),%r10d
vpsrlq $17,%ymm7,%ymm7
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 80-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r8d,%r15d
vpaddd %ymm4,%ymm1,%ymm1
xorl %r13d,%r14d
leal (%rcx,%r15,1),%ecx
movl %r11d,%r12d
vpxor %ymm7,%ymm6,%ymm6
addl 40+128(%rsp),%ebx
andl %r10d,%r12d
rorxl $25,%r10d,%r13d
vpsrlq $2,%ymm7,%ymm7
rorxl $11,%r10d,%r15d
leal (%rcx,%r14,1),%ecx
leal (%rbx,%r12,1),%ebx
vpxor %ymm7,%ymm6,%ymm6
andnl %eax,%r10d,%r12d
xorl %r15d,%r13d
rorxl $6,%r10d,%r14d
vpshufd $132,%ymm6,%ymm6
leal (%rbx,%r12,1),%ebx
xorl %r14d,%r13d
movl %ecx,%r15d
vpsrldq $8,%ymm6,%ymm6
rorxl $22,%ecx,%r12d
leal (%rbx,%r13,1),%ebx
xorl %edx,%r15d
vpaddd %ymm6,%ymm1,%ymm1
rorxl $13,%ecx,%r14d
rorxl $2,%ecx,%r13d
leal (%r9,%rbx,1),%r9d
vpshufd $80,%ymm1,%ymm7
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 96-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %edx,%esi
vpsrld $10,%ymm7,%ymm6
xorl %r13d,%r14d
leal (%rbx,%rsi,1),%ebx
movl %r10d,%r12d
vpsrlq $17,%ymm7,%ymm7
addl 44+128(%rsp),%eax
andl %r9d,%r12d
rorxl $25,%r9d,%r13d
vpxor %ymm7,%ymm6,%ymm6
rorxl $11,%r9d,%esi
leal (%rbx,%r14,1),%ebx
leal (%rax,%r12,1),%eax
vpsrlq $2,%ymm7,%ymm7
andnl %r11d,%r9d,%r12d
xorl %esi,%r13d
rorxl $6,%r9d,%r14d
vpxor %ymm7,%ymm6,%ymm6
leal (%rax,%r12,1),%eax
xorl %r14d,%r13d
movl %ebx,%esi
vpshufd $232,%ymm6,%ymm6
rorxl $22,%ebx,%r12d
leal (%rax,%r13,1),%eax
xorl %ecx,%esi
vpslldq $8,%ymm6,%ymm6
rorxl $13,%ebx,%r14d
rorxl $2,%ebx,%r13d
leal (%r8,%rax,1),%r8d
vpaddd %ymm6,%ymm1,%ymm1
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 112-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ecx,%r15d
vpaddd 32(%rbp),%ymm1,%ymm6
xorl %r13d,%r14d
leal (%rax,%r15,1),%eax
movl %r9d,%r12d
vmovdqa %ymm6,32(%rsp)
leaq -64(%rsp),%rsp
.cfi_escape 0x0f,0x05,0x77,0x38,0x06,0x23,0x08
pushq 64-8(%rsp)
.cfi_escape 0x0f,0x05,0x77,0x00,0x06,0x23,0x08
leaq 8(%rsp),%rsp
.cfi_escape 0x0f,0x05,0x77,0x78,0x06,0x23,0x08
vpalignr $4,%ymm2,%ymm3,%ymm4
addl 0+128(%rsp),%r11d
andl %r8d,%r12d
rorxl $25,%r8d,%r13d
vpalignr $4,%ymm0,%ymm1,%ymm7
rorxl $11,%r8d,%r15d
leal (%rax,%r14,1),%eax
leal (%r11,%r12,1),%r11d
vpsrld $7,%ymm4,%ymm6
andnl %r10d,%r8d,%r12d
xorl %r15d,%r13d
rorxl $6,%r8d,%r14d
vpaddd %ymm7,%ymm2,%ymm2
leal (%r11,%r12,1),%r11d
xorl %r14d,%r13d
movl %eax,%r15d
vpsrld $3,%ymm4,%ymm7
rorxl $22,%eax,%r12d
leal (%r11,%r13,1),%r11d
xorl %ebx,%r15d
vpslld $14,%ymm4,%ymm5
rorxl $13,%eax,%r14d
rorxl $2,%eax,%r13d
leal (%rdx,%r11,1),%edx
vpxor %ymm6,%ymm7,%ymm4
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 128-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ebx,%esi
vpshufd $250,%ymm1,%ymm7
xorl %r13d,%r14d
leal (%r11,%rsi,1),%r11d
movl %r8d,%r12d
vpsrld $11,%ymm6,%ymm6
addl 4+128(%rsp),%r10d
andl %edx,%r12d
rorxl $25,%edx,%r13d
vpxor %ymm5,%ymm4,%ymm4
rorxl $11,%edx,%esi
leal (%r11,%r14,1),%r11d
leal (%r10,%r12,1),%r10d
vpslld $11,%ymm5,%ymm5
andnl %r9d,%edx,%r12d
xorl %esi,%r13d
rorxl $6,%edx,%r14d
vpxor %ymm6,%ymm4,%ymm4
leal (%r10,%r12,1),%r10d
xorl %r14d,%r13d
movl %r11d,%esi
vpsrld $10,%ymm7,%ymm6
rorxl $22,%r11d,%r12d
leal (%r10,%r13,1),%r10d
xorl %eax,%esi
vpxor %ymm5,%ymm4,%ymm4
rorxl $13,%r11d,%r14d
rorxl $2,%r11d,%r13d
leal (%rcx,%r10,1),%ecx
vpsrlq $17,%ymm7,%ymm7
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 144-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %eax,%r15d
vpaddd %ymm4,%ymm2,%ymm2
xorl %r13d,%r14d
leal (%r10,%r15,1),%r10d
movl %edx,%r12d
vpxor %ymm7,%ymm6,%ymm6
addl 8+128(%rsp),%r9d
andl %ecx,%r12d
rorxl $25,%ecx,%r13d
vpsrlq $2,%ymm7,%ymm7
rorxl $11,%ecx,%r15d
leal (%r10,%r14,1),%r10d
leal (%r9,%r12,1),%r9d
vpxor %ymm7,%ymm6,%ymm6
andnl %r8d,%ecx,%r12d
xorl %r15d,%r13d
rorxl $6,%ecx,%r14d
vpshufd $132,%ymm6,%ymm6
leal (%r9,%r12,1),%r9d
xorl %r14d,%r13d
movl %r10d,%r15d
vpsrldq $8,%ymm6,%ymm6
rorxl $22,%r10d,%r12d
leal (%r9,%r13,1),%r9d
xorl %r11d,%r15d
vpaddd %ymm6,%ymm2,%ymm2
rorxl $13,%r10d,%r14d
rorxl $2,%r10d,%r13d
leal (%rbx,%r9,1),%ebx
vpshufd $80,%ymm2,%ymm7
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 160-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r11d,%esi
vpsrld $10,%ymm7,%ymm6
xorl %r13d,%r14d
leal (%r9,%rsi,1),%r9d
movl %ecx,%r12d
vpsrlq $17,%ymm7,%ymm7
addl 12+128(%rsp),%r8d
andl %ebx,%r12d
rorxl $25,%ebx,%r13d
vpxor %ymm7,%ymm6,%ymm6
rorxl $11,%ebx,%esi
leal (%r9,%r14,1),%r9d
leal (%r8,%r12,1),%r8d
vpsrlq $2,%ymm7,%ymm7
andnl %edx,%ebx,%r12d
xorl %esi,%r13d
rorxl $6,%ebx,%r14d
vpxor %ymm7,%ymm6,%ymm6
leal (%r8,%r12,1),%r8d
xorl %r14d,%r13d
movl %r9d,%esi
vpshufd $232,%ymm6,%ymm6
rorxl $22,%r9d,%r12d
leal (%r8,%r13,1),%r8d
xorl %r10d,%esi
vpslldq $8,%ymm6,%ymm6
rorxl $13,%r9d,%r14d
rorxl $2,%r9d,%r13d
leal (%rax,%r8,1),%eax
vpaddd %ymm6,%ymm2,%ymm2
andl %esi,%r15d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 176-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r10d,%r15d
vpaddd 64(%rbp),%ymm2,%ymm6
xorl %r13d,%r14d
leal (%r8,%r15,1),%r8d
movl %ebx,%r12d
vmovdqa %ymm6,0(%rsp)
vpalignr $4,%ymm3,%ymm0,%ymm4
addl 32+128(%rsp),%edx
andl %eax,%r12d
rorxl $25,%eax,%r13d
vpalignr $4,%ymm1,%ymm2,%ymm7
rorxl $11,%eax,%r15d
leal (%r8,%r14,1),%r8d
leal (%rdx,%r12,1),%edx
vpsrld $7,%ymm4,%ymm6
andnl %ecx,%eax,%r12d
xorl %r15d,%r13d
rorxl $6,%eax,%r14d
vpaddd %ymm7,%ymm3,%ymm3
leal (%rdx,%r12,1),%edx
xorl %r14d,%r13d
movl %r8d,%r15d
vpsrld $3,%ymm4,%ymm7
rorxl $22,%r8d,%r12d
leal (%rdx,%r13,1),%edx
xorl %r9d,%r15d
vpslld $14,%ymm4,%ymm5
rorxl $13,%r8d,%r14d
rorxl $2,%r8d,%r13d
leal (%r11,%rdx,1),%r11d
vpxor %ymm6,%ymm7,%ymm4
andl %r15d,%esi
vpand %xmm12,%xmm11,%xmm8
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 192-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r9d,%esi
vpshufd $250,%ymm2,%ymm7
xorl %r13d,%r14d
leal (%rdx,%rsi,1),%edx
movl %eax,%r12d
vpsrld $11,%ymm6,%ymm6
addl 36+128(%rsp),%ecx
andl %r11d,%r12d
rorxl $25,%r11d,%r13d
vpxor %ymm5,%ymm4,%ymm4
rorxl $11,%r11d,%esi
leal (%rdx,%r14,1),%edx
leal (%rcx,%r12,1),%ecx
vpslld $11,%ymm5,%ymm5
andnl %ebx,%r11d,%r12d
xorl %esi,%r13d
rorxl $6,%r11d,%r14d
vpxor %ymm6,%ymm4,%ymm4
leal (%rcx,%r12,1),%ecx
xorl %r14d,%r13d
movl %edx,%esi
vpsrld $10,%ymm7,%ymm6
rorxl $22,%edx,%r12d
leal (%rcx,%r13,1),%ecx
xorl %r8d,%esi
vpxor %ymm5,%ymm4,%ymm4
rorxl $13,%edx,%r14d
rorxl $2,%edx,%r13d
leal (%r10,%rcx,1),%r10d
vpsrlq $17,%ymm7,%ymm7
andl %esi,%r15d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 208-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r8d,%r15d
vpaddd %ymm4,%ymm3,%ymm3
xorl %r13d,%r14d
leal (%rcx,%r15,1),%ecx
movl %r11d,%r12d
vpxor %ymm7,%ymm6,%ymm6
addl 40+128(%rsp),%ebx
andl %r10d,%r12d
rorxl $25,%r10d,%r13d
vpsrlq $2,%ymm7,%ymm7
rorxl $11,%r10d,%r15d
leal (%rcx,%r14,1),%ecx
leal (%rbx,%r12,1),%ebx
vpxor %ymm7,%ymm6,%ymm6
andnl %eax,%r10d,%r12d
xorl %r15d,%r13d
rorxl $6,%r10d,%r14d
vpshufd $132,%ymm6,%ymm6
leal (%rbx,%r12,1),%ebx
xorl %r14d,%r13d
movl %ecx,%r15d
vpsrldq $8,%ymm6,%ymm6
rorxl $22,%ecx,%r12d
leal (%rbx,%r13,1),%ebx
xorl %edx,%r15d
vpaddd %ymm6,%ymm3,%ymm3
rorxl $13,%ecx,%r14d
rorxl $2,%ecx,%r13d
leal (%r9,%rbx,1),%r9d
vpshufd $80,%ymm3,%ymm7
andl %r15d,%esi
vpand %xmm13,%xmm11,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 224-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %edx,%esi
vpsrld $10,%ymm7,%ymm6
xorl %r13d,%r14d
leal (%rbx,%rsi,1),%ebx
movl %r10d,%r12d
vpsrlq $17,%ymm7,%ymm7
addl 44+128(%rsp),%eax
andl %r9d,%r12d
rorxl $25,%r9d,%r13d
vpxor %ymm7,%ymm6,%ymm6
rorxl $11,%r9d,%esi
leal (%rbx,%r14,1),%ebx
leal (%rax,%r12,1),%eax
vpsrlq $2,%ymm7,%ymm7
andnl %r11d,%r9d,%r12d
xorl %esi,%r13d
rorxl $6,%r9d,%r14d
vpxor %ymm7,%ymm6,%ymm6
leal (%rax,%r12,1),%eax
xorl %r14d,%r13d
movl %ebx,%esi
vpshufd $232,%ymm6,%ymm6
rorxl $22,%ebx,%r12d
leal (%rax,%r13,1),%eax
xorl %ecx,%esi
vpslldq $8,%ymm6,%ymm6
rorxl $13,%ebx,%r14d
rorxl $2,%ebx,%r13d
leal (%r8,%rax,1),%r8d
vpaddd %ymm6,%ymm3,%ymm3
andl %esi,%r15d
vpor %xmm11,%xmm8,%xmm8
vaesenclast %xmm10,%xmm9,%xmm11
vmovdqu 0-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ecx,%r15d
vpaddd 96(%rbp),%ymm3,%ymm6
xorl %r13d,%r14d
leal (%rax,%r15,1),%eax
movl %r9d,%r12d
vmovdqa %ymm6,32(%rsp)
vmovq %xmm15,%r13
vpextrq $1,%xmm15,%r15
vpand %xmm14,%xmm11,%xmm11
vpor %xmm11,%xmm8,%xmm8
vmovdqu %xmm8,(%r15,%r13,1)
leaq 16(%r13),%r13
leaq 128(%rbp),%rbp
cmpb $0,3(%rbp)
jne .Lavx2_00_47
vmovdqu (%r13),%xmm9
vpinsrq $0,%r13,%xmm15,%xmm15
addl 0+64(%rsp),%r11d
andl %r8d,%r12d
rorxl $25,%r8d,%r13d
rorxl $11,%r8d,%r15d
leal (%rax,%r14,1),%eax
leal (%r11,%r12,1),%r11d
andnl %r10d,%r8d,%r12d
xorl %r15d,%r13d
rorxl $6,%r8d,%r14d
leal (%r11,%r12,1),%r11d
xorl %r14d,%r13d
movl %eax,%r15d
rorxl $22,%eax,%r12d
leal (%r11,%r13,1),%r11d
xorl %ebx,%r15d
rorxl $13,%eax,%r14d
rorxl $2,%eax,%r13d
leal (%rdx,%r11,1),%edx
andl %r15d,%esi
vpxor %xmm10,%xmm9,%xmm9
vmovdqu 16-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ebx,%esi
xorl %r13d,%r14d
leal (%r11,%rsi,1),%r11d
movl %r8d,%r12d
addl 4+64(%rsp),%r10d
andl %edx,%r12d
rorxl $25,%edx,%r13d
rorxl $11,%edx,%esi
leal (%r11,%r14,1),%r11d
leal (%r10,%r12,1),%r10d
andnl %r9d,%edx,%r12d
xorl %esi,%r13d
rorxl $6,%edx,%r14d
leal (%r10,%r12,1),%r10d
xorl %r14d,%r13d
movl %r11d,%esi
rorxl $22,%r11d,%r12d
leal (%r10,%r13,1),%r10d
xorl %eax,%esi
rorxl $13,%r11d,%r14d
rorxl $2,%r11d,%r13d
leal (%rcx,%r10,1),%ecx
andl %esi,%r15d
vpxor %xmm8,%xmm9,%xmm9
xorl %r12d,%r14d
xorl %eax,%r15d
xorl %r13d,%r14d
leal (%r10,%r15,1),%r10d
movl %edx,%r12d
addl 8+64(%rsp),%r9d
andl %ecx,%r12d
rorxl $25,%ecx,%r13d
rorxl $11,%ecx,%r15d
leal (%r10,%r14,1),%r10d
leal (%r9,%r12,1),%r9d
andnl %r8d,%ecx,%r12d
xorl %r15d,%r13d
rorxl $6,%ecx,%r14d
leal (%r9,%r12,1),%r9d
xorl %r14d,%r13d
movl %r10d,%r15d
rorxl $22,%r10d,%r12d
leal (%r9,%r13,1),%r9d
xorl %r11d,%r15d
rorxl $13,%r10d,%r14d
rorxl $2,%r10d,%r13d
leal (%rbx,%r9,1),%ebx
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 32-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r11d,%esi
xorl %r13d,%r14d
leal (%r9,%rsi,1),%r9d
movl %ecx,%r12d
addl 12+64(%rsp),%r8d
andl %ebx,%r12d
rorxl $25,%ebx,%r13d
rorxl $11,%ebx,%esi
leal (%r9,%r14,1),%r9d
leal (%r8,%r12,1),%r8d
andnl %edx,%ebx,%r12d
xorl %esi,%r13d
rorxl $6,%ebx,%r14d
leal (%r8,%r12,1),%r8d
xorl %r14d,%r13d
movl %r9d,%esi
rorxl $22,%r9d,%r12d
leal (%r8,%r13,1),%r8d
xorl %r10d,%esi
rorxl $13,%r9d,%r14d
rorxl $2,%r9d,%r13d
leal (%rax,%r8,1),%eax
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 48-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r10d,%r15d
xorl %r13d,%r14d
leal (%r8,%r15,1),%r8d
movl %ebx,%r12d
addl 32+64(%rsp),%edx
andl %eax,%r12d
rorxl $25,%eax,%r13d
rorxl $11,%eax,%r15d
leal (%r8,%r14,1),%r8d
leal (%rdx,%r12,1),%edx
andnl %ecx,%eax,%r12d
xorl %r15d,%r13d
rorxl $6,%eax,%r14d
leal (%rdx,%r12,1),%edx
xorl %r14d,%r13d
movl %r8d,%r15d
rorxl $22,%r8d,%r12d
leal (%rdx,%r13,1),%edx
xorl %r9d,%r15d
rorxl $13,%r8d,%r14d
rorxl $2,%r8d,%r13d
leal (%r11,%rdx,1),%r11d
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 64-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r9d,%esi
xorl %r13d,%r14d
leal (%rdx,%rsi,1),%edx
movl %eax,%r12d
addl 36+64(%rsp),%ecx
andl %r11d,%r12d
rorxl $25,%r11d,%r13d
rorxl $11,%r11d,%esi
leal (%rdx,%r14,1),%edx
leal (%rcx,%r12,1),%ecx
andnl %ebx,%r11d,%r12d
xorl %esi,%r13d
rorxl $6,%r11d,%r14d
leal (%rcx,%r12,1),%ecx
xorl %r14d,%r13d
movl %edx,%esi
rorxl $22,%edx,%r12d
leal (%rcx,%r13,1),%ecx
xorl %r8d,%esi
rorxl $13,%edx,%r14d
rorxl $2,%edx,%r13d
leal (%r10,%rcx,1),%r10d
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 80-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r8d,%r15d
xorl %r13d,%r14d
leal (%rcx,%r15,1),%ecx
movl %r11d,%r12d
addl 40+64(%rsp),%ebx
andl %r10d,%r12d
rorxl $25,%r10d,%r13d
rorxl $11,%r10d,%r15d
leal (%rcx,%r14,1),%ecx
leal (%rbx,%r12,1),%ebx
andnl %eax,%r10d,%r12d
xorl %r15d,%r13d
rorxl $6,%r10d,%r14d
leal (%rbx,%r12,1),%ebx
xorl %r14d,%r13d
movl %ecx,%r15d
rorxl $22,%ecx,%r12d
leal (%rbx,%r13,1),%ebx
xorl %edx,%r15d
rorxl $13,%ecx,%r14d
rorxl $2,%ecx,%r13d
leal (%r9,%rbx,1),%r9d
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 96-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %edx,%esi
xorl %r13d,%r14d
leal (%rbx,%rsi,1),%ebx
movl %r10d,%r12d
addl 44+64(%rsp),%eax
andl %r9d,%r12d
rorxl $25,%r9d,%r13d
rorxl $11,%r9d,%esi
leal (%rbx,%r14,1),%ebx
leal (%rax,%r12,1),%eax
andnl %r11d,%r9d,%r12d
xorl %esi,%r13d
rorxl $6,%r9d,%r14d
leal (%rax,%r12,1),%eax
xorl %r14d,%r13d
movl %ebx,%esi
rorxl $22,%ebx,%r12d
leal (%rax,%r13,1),%eax
xorl %ecx,%esi
rorxl $13,%ebx,%r14d
rorxl $2,%ebx,%r13d
leal (%r8,%rax,1),%r8d
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 112-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ecx,%r15d
xorl %r13d,%r14d
leal (%rax,%r15,1),%eax
movl %r9d,%r12d
addl 0(%rsp),%r11d
andl %r8d,%r12d
rorxl $25,%r8d,%r13d
rorxl $11,%r8d,%r15d
leal (%rax,%r14,1),%eax
leal (%r11,%r12,1),%r11d
andnl %r10d,%r8d,%r12d
xorl %r15d,%r13d
rorxl $6,%r8d,%r14d
leal (%r11,%r12,1),%r11d
xorl %r14d,%r13d
movl %eax,%r15d
rorxl $22,%eax,%r12d
leal (%r11,%r13,1),%r11d
xorl %ebx,%r15d
rorxl $13,%eax,%r14d
rorxl $2,%eax,%r13d
leal (%rdx,%r11,1),%edx
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 128-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ebx,%esi
xorl %r13d,%r14d
leal (%r11,%rsi,1),%r11d
movl %r8d,%r12d
addl 4(%rsp),%r10d
andl %edx,%r12d
rorxl $25,%edx,%r13d
rorxl $11,%edx,%esi
leal (%r11,%r14,1),%r11d
leal (%r10,%r12,1),%r10d
andnl %r9d,%edx,%r12d
xorl %esi,%r13d
rorxl $6,%edx,%r14d
leal (%r10,%r12,1),%r10d
xorl %r14d,%r13d
movl %r11d,%esi
rorxl $22,%r11d,%r12d
leal (%r10,%r13,1),%r10d
xorl %eax,%esi
rorxl $13,%r11d,%r14d
rorxl $2,%r11d,%r13d
leal (%rcx,%r10,1),%ecx
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 144-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %eax,%r15d
xorl %r13d,%r14d
leal (%r10,%r15,1),%r10d
movl %edx,%r12d
addl 8(%rsp),%r9d
andl %ecx,%r12d
rorxl $25,%ecx,%r13d
rorxl $11,%ecx,%r15d
leal (%r10,%r14,1),%r10d
leal (%r9,%r12,1),%r9d
andnl %r8d,%ecx,%r12d
xorl %r15d,%r13d
rorxl $6,%ecx,%r14d
leal (%r9,%r12,1),%r9d
xorl %r14d,%r13d
movl %r10d,%r15d
rorxl $22,%r10d,%r12d
leal (%r9,%r13,1),%r9d
xorl %r11d,%r15d
rorxl $13,%r10d,%r14d
rorxl $2,%r10d,%r13d
leal (%rbx,%r9,1),%ebx
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 160-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r11d,%esi
xorl %r13d,%r14d
leal (%r9,%rsi,1),%r9d
movl %ecx,%r12d
addl 12(%rsp),%r8d
andl %ebx,%r12d
rorxl $25,%ebx,%r13d
rorxl $11,%ebx,%esi
leal (%r9,%r14,1),%r9d
leal (%r8,%r12,1),%r8d
andnl %edx,%ebx,%r12d
xorl %esi,%r13d
rorxl $6,%ebx,%r14d
leal (%r8,%r12,1),%r8d
xorl %r14d,%r13d
movl %r9d,%esi
rorxl $22,%r9d,%r12d
leal (%r8,%r13,1),%r8d
xorl %r10d,%esi
rorxl $13,%r9d,%r14d
rorxl $2,%r9d,%r13d
leal (%rax,%r8,1),%eax
andl %esi,%r15d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 176-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r10d,%r15d
xorl %r13d,%r14d
leal (%r8,%r15,1),%r8d
movl %ebx,%r12d
addl 32(%rsp),%edx
andl %eax,%r12d
rorxl $25,%eax,%r13d
rorxl $11,%eax,%r15d
leal (%r8,%r14,1),%r8d
leal (%rdx,%r12,1),%edx
andnl %ecx,%eax,%r12d
xorl %r15d,%r13d
rorxl $6,%eax,%r14d
leal (%rdx,%r12,1),%edx
xorl %r14d,%r13d
movl %r8d,%r15d
rorxl $22,%r8d,%r12d
leal (%rdx,%r13,1),%edx
xorl %r9d,%r15d
rorxl $13,%r8d,%r14d
rorxl $2,%r8d,%r13d
leal (%r11,%rdx,1),%r11d
andl %r15d,%esi
vpand %xmm12,%xmm11,%xmm8
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 192-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r9d,%esi
xorl %r13d,%r14d
leal (%rdx,%rsi,1),%edx
movl %eax,%r12d
addl 36(%rsp),%ecx
andl %r11d,%r12d
rorxl $25,%r11d,%r13d
rorxl $11,%r11d,%esi
leal (%rdx,%r14,1),%edx
leal (%rcx,%r12,1),%ecx
andnl %ebx,%r11d,%r12d
xorl %esi,%r13d
rorxl $6,%r11d,%r14d
leal (%rcx,%r12,1),%ecx
xorl %r14d,%r13d
movl %edx,%esi
rorxl $22,%edx,%r12d
leal (%rcx,%r13,1),%ecx
xorl %r8d,%esi
rorxl $13,%edx,%r14d
rorxl $2,%edx,%r13d
leal (%r10,%rcx,1),%r10d
andl %esi,%r15d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 208-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r8d,%r15d
xorl %r13d,%r14d
leal (%rcx,%r15,1),%ecx
movl %r11d,%r12d
addl 40(%rsp),%ebx
andl %r10d,%r12d
rorxl $25,%r10d,%r13d
rorxl $11,%r10d,%r15d
leal (%rcx,%r14,1),%ecx
leal (%rbx,%r12,1),%ebx
andnl %eax,%r10d,%r12d
xorl %r15d,%r13d
rorxl $6,%r10d,%r14d
leal (%rbx,%r12,1),%ebx
xorl %r14d,%r13d
movl %ecx,%r15d
rorxl $22,%ecx,%r12d
leal (%rbx,%r13,1),%ebx
xorl %edx,%r15d
rorxl $13,%ecx,%r14d
rorxl $2,%ecx,%r13d
leal (%r9,%rbx,1),%r9d
andl %r15d,%esi
vpand %xmm13,%xmm11,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 224-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %edx,%esi
xorl %r13d,%r14d
leal (%rbx,%rsi,1),%ebx
movl %r10d,%r12d
addl 44(%rsp),%eax
andl %r9d,%r12d
rorxl $25,%r9d,%r13d
rorxl $11,%r9d,%esi
leal (%rbx,%r14,1),%ebx
leal (%rax,%r12,1),%eax
andnl %r11d,%r9d,%r12d
xorl %esi,%r13d
rorxl $6,%r9d,%r14d
leal (%rax,%r12,1),%eax
xorl %r14d,%r13d
movl %ebx,%esi
rorxl $22,%ebx,%r12d
leal (%rax,%r13,1),%eax
xorl %ecx,%esi
rorxl $13,%ebx,%r14d
rorxl $2,%ebx,%r13d
leal (%r8,%rax,1),%r8d
andl %esi,%r15d
vpor %xmm11,%xmm8,%xmm8
vaesenclast %xmm10,%xmm9,%xmm11
vmovdqu 0-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ecx,%r15d
xorl %r13d,%r14d
leal (%rax,%r15,1),%eax
movl %r9d,%r12d
vpextrq $1,%xmm15,%r12
vmovq %xmm15,%r13
movq 552(%rsp),%r15
addl %r14d,%eax
leaq 448(%rsp),%rbp
vpand %xmm14,%xmm11,%xmm11
vpor %xmm11,%xmm8,%xmm8
vmovdqu %xmm8,(%r12,%r13,1)
leaq 16(%r13),%r13
addl 0(%r15),%eax
addl 4(%r15),%ebx
addl 8(%r15),%ecx
addl 12(%r15),%edx
addl 16(%r15),%r8d
addl 20(%r15),%r9d
addl 24(%r15),%r10d
addl 28(%r15),%r11d
movl %eax,0(%r15)
movl %ebx,4(%r15)
movl %ecx,8(%r15)
movl %edx,12(%r15)
movl %r8d,16(%r15)
movl %r9d,20(%r15)
movl %r10d,24(%r15)
movl %r11d,28(%r15)
cmpq 80(%rbp),%r13
je .Ldone_avx2
xorl %r14d,%r14d
movl %ebx,%esi
movl %r9d,%r12d
xorl %ecx,%esi
jmp .Lower_avx2
.align 16
.Lower_avx2:
vmovdqu (%r13),%xmm9
vpinsrq $0,%r13,%xmm15,%xmm15
addl 0+16(%rbp),%r11d
andl %r8d,%r12d
rorxl $25,%r8d,%r13d
rorxl $11,%r8d,%r15d
leal (%rax,%r14,1),%eax
leal (%r11,%r12,1),%r11d
andnl %r10d,%r8d,%r12d
xorl %r15d,%r13d
rorxl $6,%r8d,%r14d
leal (%r11,%r12,1),%r11d
xorl %r14d,%r13d
movl %eax,%r15d
rorxl $22,%eax,%r12d
leal (%r11,%r13,1),%r11d
xorl %ebx,%r15d
rorxl $13,%eax,%r14d
rorxl $2,%eax,%r13d
leal (%rdx,%r11,1),%edx
andl %r15d,%esi
vpxor %xmm10,%xmm9,%xmm9
vmovdqu 16-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ebx,%esi
xorl %r13d,%r14d
leal (%r11,%rsi,1),%r11d
movl %r8d,%r12d
addl 4+16(%rbp),%r10d
andl %edx,%r12d
rorxl $25,%edx,%r13d
rorxl $11,%edx,%esi
leal (%r11,%r14,1),%r11d
leal (%r10,%r12,1),%r10d
andnl %r9d,%edx,%r12d
xorl %esi,%r13d
rorxl $6,%edx,%r14d
leal (%r10,%r12,1),%r10d
xorl %r14d,%r13d
movl %r11d,%esi
rorxl $22,%r11d,%r12d
leal (%r10,%r13,1),%r10d
xorl %eax,%esi
rorxl $13,%r11d,%r14d
rorxl $2,%r11d,%r13d
leal (%rcx,%r10,1),%ecx
andl %esi,%r15d
vpxor %xmm8,%xmm9,%xmm9
xorl %r12d,%r14d
xorl %eax,%r15d
xorl %r13d,%r14d
leal (%r10,%r15,1),%r10d
movl %edx,%r12d
addl 8+16(%rbp),%r9d
andl %ecx,%r12d
rorxl $25,%ecx,%r13d
rorxl $11,%ecx,%r15d
leal (%r10,%r14,1),%r10d
leal (%r9,%r12,1),%r9d
andnl %r8d,%ecx,%r12d
xorl %r15d,%r13d
rorxl $6,%ecx,%r14d
leal (%r9,%r12,1),%r9d
xorl %r14d,%r13d
movl %r10d,%r15d
rorxl $22,%r10d,%r12d
leal (%r9,%r13,1),%r9d
xorl %r11d,%r15d
rorxl $13,%r10d,%r14d
rorxl $2,%r10d,%r13d
leal (%rbx,%r9,1),%ebx
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 32-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r11d,%esi
xorl %r13d,%r14d
leal (%r9,%rsi,1),%r9d
movl %ecx,%r12d
addl 12+16(%rbp),%r8d
andl %ebx,%r12d
rorxl $25,%ebx,%r13d
rorxl $11,%ebx,%esi
leal (%r9,%r14,1),%r9d
leal (%r8,%r12,1),%r8d
andnl %edx,%ebx,%r12d
xorl %esi,%r13d
rorxl $6,%ebx,%r14d
leal (%r8,%r12,1),%r8d
xorl %r14d,%r13d
movl %r9d,%esi
rorxl $22,%r9d,%r12d
leal (%r8,%r13,1),%r8d
xorl %r10d,%esi
rorxl $13,%r9d,%r14d
rorxl $2,%r9d,%r13d
leal (%rax,%r8,1),%eax
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 48-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r10d,%r15d
xorl %r13d,%r14d
leal (%r8,%r15,1),%r8d
movl %ebx,%r12d
addl 32+16(%rbp),%edx
andl %eax,%r12d
rorxl $25,%eax,%r13d
rorxl $11,%eax,%r15d
leal (%r8,%r14,1),%r8d
leal (%rdx,%r12,1),%edx
andnl %ecx,%eax,%r12d
xorl %r15d,%r13d
rorxl $6,%eax,%r14d
leal (%rdx,%r12,1),%edx
xorl %r14d,%r13d
movl %r8d,%r15d
rorxl $22,%r8d,%r12d
leal (%rdx,%r13,1),%edx
xorl %r9d,%r15d
rorxl $13,%r8d,%r14d
rorxl $2,%r8d,%r13d
leal (%r11,%rdx,1),%r11d
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 64-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r9d,%esi
xorl %r13d,%r14d
leal (%rdx,%rsi,1),%edx
movl %eax,%r12d
addl 36+16(%rbp),%ecx
andl %r11d,%r12d
rorxl $25,%r11d,%r13d
rorxl $11,%r11d,%esi
leal (%rdx,%r14,1),%edx
leal (%rcx,%r12,1),%ecx
andnl %ebx,%r11d,%r12d
xorl %esi,%r13d
rorxl $6,%r11d,%r14d
leal (%rcx,%r12,1),%ecx
xorl %r14d,%r13d
movl %edx,%esi
rorxl $22,%edx,%r12d
leal (%rcx,%r13,1),%ecx
xorl %r8d,%esi
rorxl $13,%edx,%r14d
rorxl $2,%edx,%r13d
leal (%r10,%rcx,1),%r10d
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 80-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r8d,%r15d
xorl %r13d,%r14d
leal (%rcx,%r15,1),%ecx
movl %r11d,%r12d
addl 40+16(%rbp),%ebx
andl %r10d,%r12d
rorxl $25,%r10d,%r13d
rorxl $11,%r10d,%r15d
leal (%rcx,%r14,1),%ecx
leal (%rbx,%r12,1),%ebx
andnl %eax,%r10d,%r12d
xorl %r15d,%r13d
rorxl $6,%r10d,%r14d
leal (%rbx,%r12,1),%ebx
xorl %r14d,%r13d
movl %ecx,%r15d
rorxl $22,%ecx,%r12d
leal (%rbx,%r13,1),%ebx
xorl %edx,%r15d
rorxl $13,%ecx,%r14d
rorxl $2,%ecx,%r13d
leal (%r9,%rbx,1),%r9d
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 96-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %edx,%esi
xorl %r13d,%r14d
leal (%rbx,%rsi,1),%ebx
movl %r10d,%r12d
addl 44+16(%rbp),%eax
andl %r9d,%r12d
rorxl $25,%r9d,%r13d
rorxl $11,%r9d,%esi
leal (%rbx,%r14,1),%ebx
leal (%rax,%r12,1),%eax
andnl %r11d,%r9d,%r12d
xorl %esi,%r13d
rorxl $6,%r9d,%r14d
leal (%rax,%r12,1),%eax
xorl %r14d,%r13d
movl %ebx,%esi
rorxl $22,%ebx,%r12d
leal (%rax,%r13,1),%eax
xorl %ecx,%esi
rorxl $13,%ebx,%r14d
rorxl $2,%ebx,%r13d
leal (%r8,%rax,1),%r8d
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 112-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ecx,%r15d
xorl %r13d,%r14d
leal (%rax,%r15,1),%eax
movl %r9d,%r12d
leaq -64(%rbp),%rbp
addl 0+16(%rbp),%r11d
andl %r8d,%r12d
rorxl $25,%r8d,%r13d
rorxl $11,%r8d,%r15d
leal (%rax,%r14,1),%eax
leal (%r11,%r12,1),%r11d
andnl %r10d,%r8d,%r12d
xorl %r15d,%r13d
rorxl $6,%r8d,%r14d
leal (%r11,%r12,1),%r11d
xorl %r14d,%r13d
movl %eax,%r15d
rorxl $22,%eax,%r12d
leal (%r11,%r13,1),%r11d
xorl %ebx,%r15d
rorxl $13,%eax,%r14d
rorxl $2,%eax,%r13d
leal (%rdx,%r11,1),%edx
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 128-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ebx,%esi
xorl %r13d,%r14d
leal (%r11,%rsi,1),%r11d
movl %r8d,%r12d
addl 4+16(%rbp),%r10d
andl %edx,%r12d
rorxl $25,%edx,%r13d
rorxl $11,%edx,%esi
leal (%r11,%r14,1),%r11d
leal (%r10,%r12,1),%r10d
andnl %r9d,%edx,%r12d
xorl %esi,%r13d
rorxl $6,%edx,%r14d
leal (%r10,%r12,1),%r10d
xorl %r14d,%r13d
movl %r11d,%esi
rorxl $22,%r11d,%r12d
leal (%r10,%r13,1),%r10d
xorl %eax,%esi
rorxl $13,%r11d,%r14d
rorxl $2,%r11d,%r13d
leal (%rcx,%r10,1),%ecx
andl %esi,%r15d
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 144-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %eax,%r15d
xorl %r13d,%r14d
leal (%r10,%r15,1),%r10d
movl %edx,%r12d
addl 8+16(%rbp),%r9d
andl %ecx,%r12d
rorxl $25,%ecx,%r13d
rorxl $11,%ecx,%r15d
leal (%r10,%r14,1),%r10d
leal (%r9,%r12,1),%r9d
andnl %r8d,%ecx,%r12d
xorl %r15d,%r13d
rorxl $6,%ecx,%r14d
leal (%r9,%r12,1),%r9d
xorl %r14d,%r13d
movl %r10d,%r15d
rorxl $22,%r10d,%r12d
leal (%r9,%r13,1),%r9d
xorl %r11d,%r15d
rorxl $13,%r10d,%r14d
rorxl $2,%r10d,%r13d
leal (%rbx,%r9,1),%ebx
andl %r15d,%esi
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 160-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r11d,%esi
xorl %r13d,%r14d
leal (%r9,%rsi,1),%r9d
movl %ecx,%r12d
addl 12+16(%rbp),%r8d
andl %ebx,%r12d
rorxl $25,%ebx,%r13d
rorxl $11,%ebx,%esi
leal (%r9,%r14,1),%r9d
leal (%r8,%r12,1),%r8d
andnl %edx,%ebx,%r12d
xorl %esi,%r13d
rorxl $6,%ebx,%r14d
leal (%r8,%r12,1),%r8d
xorl %r14d,%r13d
movl %r9d,%esi
rorxl $22,%r9d,%r12d
leal (%r8,%r13,1),%r8d
xorl %r10d,%esi
rorxl $13,%r9d,%r14d
rorxl $2,%r9d,%r13d
leal (%rax,%r8,1),%eax
andl %esi,%r15d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 176-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r10d,%r15d
xorl %r13d,%r14d
leal (%r8,%r15,1),%r8d
movl %ebx,%r12d
addl 32+16(%rbp),%edx
andl %eax,%r12d
rorxl $25,%eax,%r13d
rorxl $11,%eax,%r15d
leal (%r8,%r14,1),%r8d
leal (%rdx,%r12,1),%edx
andnl %ecx,%eax,%r12d
xorl %r15d,%r13d
rorxl $6,%eax,%r14d
leal (%rdx,%r12,1),%edx
xorl %r14d,%r13d
movl %r8d,%r15d
rorxl $22,%r8d,%r12d
leal (%rdx,%r13,1),%edx
xorl %r9d,%r15d
rorxl $13,%r8d,%r14d
rorxl $2,%r8d,%r13d
leal (%r11,%rdx,1),%r11d
andl %r15d,%esi
vpand %xmm12,%xmm11,%xmm8
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 192-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r9d,%esi
xorl %r13d,%r14d
leal (%rdx,%rsi,1),%edx
movl %eax,%r12d
addl 36+16(%rbp),%ecx
andl %r11d,%r12d
rorxl $25,%r11d,%r13d
rorxl $11,%r11d,%esi
leal (%rdx,%r14,1),%edx
leal (%rcx,%r12,1),%ecx
andnl %ebx,%r11d,%r12d
xorl %esi,%r13d
rorxl $6,%r11d,%r14d
leal (%rcx,%r12,1),%ecx
xorl %r14d,%r13d
movl %edx,%esi
rorxl $22,%edx,%r12d
leal (%rcx,%r13,1),%ecx
xorl %r8d,%esi
rorxl $13,%edx,%r14d
rorxl $2,%edx,%r13d
leal (%r10,%rcx,1),%r10d
andl %esi,%r15d
vaesenclast %xmm10,%xmm9,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 208-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %r8d,%r15d
xorl %r13d,%r14d
leal (%rcx,%r15,1),%ecx
movl %r11d,%r12d
addl 40+16(%rbp),%ebx
andl %r10d,%r12d
rorxl $25,%r10d,%r13d
rorxl $11,%r10d,%r15d
leal (%rcx,%r14,1),%ecx
leal (%rbx,%r12,1),%ebx
andnl %eax,%r10d,%r12d
xorl %r15d,%r13d
rorxl $6,%r10d,%r14d
leal (%rbx,%r12,1),%ebx
xorl %r14d,%r13d
movl %ecx,%r15d
rorxl $22,%ecx,%r12d
leal (%rbx,%r13,1),%ebx
xorl %edx,%r15d
rorxl $13,%ecx,%r14d
rorxl $2,%ecx,%r13d
leal (%r9,%rbx,1),%r9d
andl %r15d,%esi
vpand %xmm13,%xmm11,%xmm11
vaesenc %xmm10,%xmm9,%xmm9
vmovdqu 224-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %edx,%esi
xorl %r13d,%r14d
leal (%rbx,%rsi,1),%ebx
movl %r10d,%r12d
addl 44+16(%rbp),%eax
andl %r9d,%r12d
rorxl $25,%r9d,%r13d
rorxl $11,%r9d,%esi
leal (%rbx,%r14,1),%ebx
leal (%rax,%r12,1),%eax
andnl %r11d,%r9d,%r12d
xorl %esi,%r13d
rorxl $6,%r9d,%r14d
leal (%rax,%r12,1),%eax
xorl %r14d,%r13d
movl %ebx,%esi
rorxl $22,%ebx,%r12d
leal (%rax,%r13,1),%eax
xorl %ecx,%esi
rorxl $13,%ebx,%r14d
rorxl $2,%ebx,%r13d
leal (%r8,%rax,1),%r8d
andl %esi,%r15d
vpor %xmm11,%xmm8,%xmm8
vaesenclast %xmm10,%xmm9,%xmm11
vmovdqu 0-128(%rdi),%xmm10
xorl %r12d,%r14d
xorl %ecx,%r15d
xorl %r13d,%r14d
leal (%rax,%r15,1),%eax
movl %r9d,%r12d
vmovq %xmm15,%r13
vpextrq $1,%xmm15,%r15
vpand %xmm14,%xmm11,%xmm11
vpor %xmm11,%xmm8,%xmm8
leaq -64(%rbp),%rbp
vmovdqu %xmm8,(%r15,%r13,1)
leaq 16(%r13),%r13
cmpq %rsp,%rbp
jae .Lower_avx2
movq 552(%rsp),%r15
leaq 64(%r13),%r13
movq 560(%rsp),%rsi
addl %r14d,%eax
leaq 448(%rsp),%rsp
addl 0(%r15),%eax
addl 4(%r15),%ebx
addl 8(%r15),%ecx
addl 12(%r15),%edx
addl 16(%r15),%r8d
addl 20(%r15),%r9d
addl 24(%r15),%r10d
leaq (%rsi,%r13,1),%r12
addl 28(%r15),%r11d
cmpq 64+16(%rsp),%r13
movl %eax,0(%r15)
cmoveq %rsp,%r12
movl %ebx,4(%r15)
movl %ecx,8(%r15)
movl %edx,12(%r15)
movl %r8d,16(%r15)
movl %r9d,20(%r15)
movl %r10d,24(%r15)
movl %r11d,28(%r15)
jbe .Loop_avx2
leaq (%rsp),%rbp
.cfi_escape 0x0f,0x06,0x76,0xf8,0x00,0x06,0x23,0x08
.Ldone_avx2:
movq 64+32(%rbp),%r8
movq 64+56(%rbp),%rsi
.cfi_def_cfa %rsi,8
vmovdqu %xmm8,(%r8)
vzeroall
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx2:
.byte 0xf3,0xc3
.cfi_endproc
.size aesni_cbc_sha256_enc_avx2,.-aesni_cbc_sha256_enc_avx2
.type aesni_cbc_sha256_enc_shaext,@function
.align 32
aesni_cbc_sha256_enc_shaext:
.cfi_startproc
movq 8(%rsp),%r10
leaq K256+128(%rip),%rax
movdqu (%r9),%xmm1
movdqu 16(%r9),%xmm2
movdqa 512-128(%rax),%xmm3
movl 240(%rcx),%r11d
subq %rdi,%rsi
movups (%rcx),%xmm15
movups (%r8),%xmm6
movups 16(%rcx),%xmm4
leaq 112(%rcx),%rcx
pshufd $0x1b,%xmm1,%xmm0
pshufd $0xb1,%xmm1,%xmm1
pshufd $0x1b,%xmm2,%xmm2
movdqa %xmm3,%xmm7
.byte 102,15,58,15,202,8
punpcklqdq %xmm0,%xmm2
jmp .Loop_shaext
.align 16
.Loop_shaext:
movdqu (%r10),%xmm10
movdqu 16(%r10),%xmm11
movdqu 32(%r10),%xmm12
.byte 102,68,15,56,0,211
movdqu 48(%r10),%xmm13
movdqa 0-128(%rax),%xmm0
paddd %xmm10,%xmm0
.byte 102,68,15,56,0,219
movdqa %xmm2,%xmm9
movdqa %xmm1,%xmm8
movups 0(%rdi),%xmm14
xorps %xmm15,%xmm14
xorps %xmm14,%xmm6
movups -80(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movups -64(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,202
movdqa 32-128(%rax),%xmm0
paddd %xmm11,%xmm0
.byte 102,68,15,56,0,227
leaq 64(%r10),%r10
movups -48(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movups -32(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,202
movdqa 64-128(%rax),%xmm0
paddd %xmm12,%xmm0
.byte 102,68,15,56,0,235
.byte 69,15,56,204,211
movups -16(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm13,%xmm3
.byte 102,65,15,58,15,220,4
paddd %xmm3,%xmm10
movups 0(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,202
movdqa 96-128(%rax),%xmm0
paddd %xmm13,%xmm0
.byte 69,15,56,205,213
.byte 69,15,56,204,220
movups 16(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movups 32(%rcx),%xmm4
aesenc %xmm5,%xmm6
movdqa %xmm10,%xmm3
.byte 102,65,15,58,15,221,4
paddd %xmm3,%xmm11
.byte 15,56,203,202
movdqa 128-128(%rax),%xmm0
paddd %xmm10,%xmm0
.byte 69,15,56,205,218
.byte 69,15,56,204,229
movups 48(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm11,%xmm3
.byte 102,65,15,58,15,218,4
paddd %xmm3,%xmm12
cmpl $11,%r11d
jb .Laesenclast1
movups 64(%rcx),%xmm4
aesenc %xmm5,%xmm6
movups 80(%rcx),%xmm5
aesenc %xmm4,%xmm6
je .Laesenclast1
movups 96(%rcx),%xmm4
aesenc %xmm5,%xmm6
movups 112(%rcx),%xmm5
aesenc %xmm4,%xmm6
.Laesenclast1:
aesenclast %xmm5,%xmm6
movups 16-112(%rcx),%xmm4
nop
.byte 15,56,203,202
movups 16(%rdi),%xmm14
xorps %xmm15,%xmm14
movups %xmm6,0(%rsi,%rdi,1)
xorps %xmm14,%xmm6
movups -80(%rcx),%xmm5
aesenc %xmm4,%xmm6
movdqa 160-128(%rax),%xmm0
paddd %xmm11,%xmm0
.byte 69,15,56,205,227
.byte 69,15,56,204,234
movups -64(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm12,%xmm3
.byte 102,65,15,58,15,219,4
paddd %xmm3,%xmm13
movups -48(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,202
movdqa 192-128(%rax),%xmm0
paddd %xmm12,%xmm0
.byte 69,15,56,205,236
.byte 69,15,56,204,211
movups -32(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm13,%xmm3
.byte 102,65,15,58,15,220,4
paddd %xmm3,%xmm10
movups -16(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,202
movdqa 224-128(%rax),%xmm0
paddd %xmm13,%xmm0
.byte 69,15,56,205,213
.byte 69,15,56,204,220
movups 0(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm10,%xmm3
.byte 102,65,15,58,15,221,4
paddd %xmm3,%xmm11
movups 16(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,202
movdqa 256-128(%rax),%xmm0
paddd %xmm10,%xmm0
.byte 69,15,56,205,218
.byte 69,15,56,204,229
movups 32(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm11,%xmm3
.byte 102,65,15,58,15,218,4
paddd %xmm3,%xmm12
movups 48(%rcx),%xmm5
aesenc %xmm4,%xmm6
cmpl $11,%r11d
jb .Laesenclast2
movups 64(%rcx),%xmm4
aesenc %xmm5,%xmm6
movups 80(%rcx),%xmm5
aesenc %xmm4,%xmm6
je .Laesenclast2
movups 96(%rcx),%xmm4
aesenc %xmm5,%xmm6
movups 112(%rcx),%xmm5
aesenc %xmm4,%xmm6
.Laesenclast2:
aesenclast %xmm5,%xmm6
movups 16-112(%rcx),%xmm4
nop
.byte 15,56,203,202
movups 32(%rdi),%xmm14
xorps %xmm15,%xmm14
movups %xmm6,16(%rsi,%rdi,1)
xorps %xmm14,%xmm6
movups -80(%rcx),%xmm5
aesenc %xmm4,%xmm6
movdqa 288-128(%rax),%xmm0
paddd %xmm11,%xmm0
.byte 69,15,56,205,227
.byte 69,15,56,204,234
movups -64(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm12,%xmm3
.byte 102,65,15,58,15,219,4
paddd %xmm3,%xmm13
movups -48(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,202
movdqa 320-128(%rax),%xmm0
paddd %xmm12,%xmm0
.byte 69,15,56,205,236
.byte 69,15,56,204,211
movups -32(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm13,%xmm3
.byte 102,65,15,58,15,220,4
paddd %xmm3,%xmm10
movups -16(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,202
movdqa 352-128(%rax),%xmm0
paddd %xmm13,%xmm0
.byte 69,15,56,205,213
.byte 69,15,56,204,220
movups 0(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm10,%xmm3
.byte 102,65,15,58,15,221,4
paddd %xmm3,%xmm11
movups 16(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,202
movdqa 384-128(%rax),%xmm0
paddd %xmm10,%xmm0
.byte 69,15,56,205,218
.byte 69,15,56,204,229
movups 32(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm11,%xmm3
.byte 102,65,15,58,15,218,4
paddd %xmm3,%xmm12
movups 48(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,202
movdqa 416-128(%rax),%xmm0
paddd %xmm11,%xmm0
.byte 69,15,56,205,227
.byte 69,15,56,204,234
cmpl $11,%r11d
jb .Laesenclast3
movups 64(%rcx),%xmm4
aesenc %xmm5,%xmm6
movups 80(%rcx),%xmm5
aesenc %xmm4,%xmm6
je .Laesenclast3
movups 96(%rcx),%xmm4
aesenc %xmm5,%xmm6
movups 112(%rcx),%xmm5
aesenc %xmm4,%xmm6
.Laesenclast3:
aesenclast %xmm5,%xmm6
movups 16-112(%rcx),%xmm4
nop
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm12,%xmm3
.byte 102,65,15,58,15,219,4
paddd %xmm3,%xmm13
movups 48(%rdi),%xmm14
xorps %xmm15,%xmm14
movups %xmm6,32(%rsi,%rdi,1)
xorps %xmm14,%xmm6
movups -80(%rcx),%xmm5
aesenc %xmm4,%xmm6
movups -64(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,202
movdqa 448-128(%rax),%xmm0
paddd %xmm12,%xmm0
.byte 69,15,56,205,236
movdqa %xmm7,%xmm3
movups -48(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movups -32(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,202
movdqa 480-128(%rax),%xmm0
paddd %xmm13,%xmm0
movups -16(%rcx),%xmm5
aesenc %xmm4,%xmm6
movups 0(%rcx),%xmm4
aesenc %xmm5,%xmm6
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movups 16(%rcx),%xmm5
aesenc %xmm4,%xmm6
.byte 15,56,203,202
movups 32(%rcx),%xmm4
aesenc %xmm5,%xmm6
movups 48(%rcx),%xmm5
aesenc %xmm4,%xmm6
cmpl $11,%r11d
jb .Laesenclast4
movups 64(%rcx),%xmm4
aesenc %xmm5,%xmm6
movups 80(%rcx),%xmm5
aesenc %xmm4,%xmm6
je .Laesenclast4
movups 96(%rcx),%xmm4
aesenc %xmm5,%xmm6
movups 112(%rcx),%xmm5
aesenc %xmm4,%xmm6
.Laesenclast4:
aesenclast %xmm5,%xmm6
movups 16-112(%rcx),%xmm4
nop
paddd %xmm9,%xmm2
paddd %xmm8,%xmm1
decq %rdx
movups %xmm6,48(%rsi,%rdi,1)
leaq 64(%rdi),%rdi
jnz .Loop_shaext
pshufd $0xb1,%xmm2,%xmm2
pshufd $0x1b,%xmm1,%xmm3
pshufd $0xb1,%xmm1,%xmm1
punpckhqdq %xmm2,%xmm1
.byte 102,15,58,15,211,8
movups %xmm6,(%r8)
movdqu %xmm1,(%r9)
movdqu %xmm2,16(%r9)
.byte 0xf3,0xc3
.cfi_endproc
.size aesni_cbc_sha256_enc_shaext,.-aesni_cbc_sha256_enc_shaext
#endif
|
wlsfx/bnbb
| 67,967
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.section .rodata
.align 16
one:
.quad 1,0
two:
.quad 2,0
three:
.quad 3,0
four:
.quad 4,0
five:
.quad 5,0
six:
.quad 6,0
seven:
.quad 7,0
eight:
.quad 8,0
OR_MASK:
.long 0x00000000,0x00000000,0x00000000,0x80000000
poly:
.quad 0x1, 0xc200000000000000
mask:
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
con1:
.long 1,1,1,1
con2:
.long 0x1b,0x1b,0x1b,0x1b
con3:
.byte -1,-1,-1,-1,-1,-1,-1,-1,4,5,6,7,4,5,6,7
and_mask:
.long 0,0xffffffff, 0xffffffff, 0xffffffff
.text
.type GFMUL,@function
.align 16
GFMUL:
.cfi_startproc
vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2
vpclmulqdq $0x11,%xmm1,%xmm0,%xmm5
vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3
vpclmulqdq $0x01,%xmm1,%xmm0,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $8,%xmm3,%xmm4
vpsrldq $8,%xmm3,%xmm3
vpxor %xmm4,%xmm2,%xmm2
vpxor %xmm3,%xmm5,%xmm5
vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3
vpshufd $78,%xmm2,%xmm4
vpxor %xmm4,%xmm3,%xmm2
vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3
vpshufd $78,%xmm2,%xmm4
vpxor %xmm4,%xmm3,%xmm2
vpxor %xmm5,%xmm2,%xmm0
.byte 0xf3,0xc3
.cfi_endproc
.size GFMUL, .-GFMUL
.globl aesgcmsiv_htable_init
.hidden aesgcmsiv_htable_init
.type aesgcmsiv_htable_init,@function
.align 16
aesgcmsiv_htable_init:
.cfi_startproc
_CET_ENDBR
vmovdqa (%rsi),%xmm0
vmovdqa %xmm0,%xmm1
vmovdqa %xmm0,(%rdi)
call GFMUL
vmovdqa %xmm0,16(%rdi)
call GFMUL
vmovdqa %xmm0,32(%rdi)
call GFMUL
vmovdqa %xmm0,48(%rdi)
call GFMUL
vmovdqa %xmm0,64(%rdi)
call GFMUL
vmovdqa %xmm0,80(%rdi)
call GFMUL
vmovdqa %xmm0,96(%rdi)
call GFMUL
vmovdqa %xmm0,112(%rdi)
.byte 0xf3,0xc3
.cfi_endproc
.size aesgcmsiv_htable_init, .-aesgcmsiv_htable_init
.globl aesgcmsiv_htable6_init
.hidden aesgcmsiv_htable6_init
.type aesgcmsiv_htable6_init,@function
.align 16
aesgcmsiv_htable6_init:
.cfi_startproc
_CET_ENDBR
vmovdqa (%rsi),%xmm0
vmovdqa %xmm0,%xmm1
vmovdqa %xmm0,(%rdi)
call GFMUL
vmovdqa %xmm0,16(%rdi)
call GFMUL
vmovdqa %xmm0,32(%rdi)
call GFMUL
vmovdqa %xmm0,48(%rdi)
call GFMUL
vmovdqa %xmm0,64(%rdi)
call GFMUL
vmovdqa %xmm0,80(%rdi)
.byte 0xf3,0xc3
.cfi_endproc
.size aesgcmsiv_htable6_init, .-aesgcmsiv_htable6_init
.globl aesgcmsiv_htable_polyval
.hidden aesgcmsiv_htable_polyval
.type aesgcmsiv_htable_polyval,@function
.align 16
aesgcmsiv_htable_polyval:
.cfi_startproc
_CET_ENDBR
testq %rdx,%rdx
jnz .Lhtable_polyval_start
.byte 0xf3,0xc3
.Lhtable_polyval_start:
vzeroall
movq %rdx,%r11
andq $127,%r11
jz .Lhtable_polyval_no_prefix
vpxor %xmm9,%xmm9,%xmm9
vmovdqa (%rcx),%xmm1
subq %r11,%rdx
subq $16,%r11
vmovdqu (%rsi),%xmm0
vpxor %xmm1,%xmm0,%xmm0
vpclmulqdq $0x01,(%rdi,%r11,1),%xmm0,%xmm5
vpclmulqdq $0x00,(%rdi,%r11,1),%xmm0,%xmm3
vpclmulqdq $0x11,(%rdi,%r11,1),%xmm0,%xmm4
vpclmulqdq $0x10,(%rdi,%r11,1),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
leaq 16(%rsi),%rsi
testq %r11,%r11
jnz .Lhtable_polyval_prefix_loop
jmp .Lhtable_polyval_prefix_complete
.align 64
.Lhtable_polyval_prefix_loop:
subq $16,%r11
vmovdqu (%rsi),%xmm0
vpclmulqdq $0x00,(%rdi,%r11,1),%xmm0,%xmm6
vpxor %xmm6,%xmm3,%xmm3
vpclmulqdq $0x11,(%rdi,%r11,1),%xmm0,%xmm6
vpxor %xmm6,%xmm4,%xmm4
vpclmulqdq $0x01,(%rdi,%r11,1),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x10,(%rdi,%r11,1),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
testq %r11,%r11
leaq 16(%rsi),%rsi
jnz .Lhtable_polyval_prefix_loop
.Lhtable_polyval_prefix_complete:
vpsrldq $8,%xmm5,%xmm6
vpslldq $8,%xmm5,%xmm5
vpxor %xmm6,%xmm4,%xmm9
vpxor %xmm5,%xmm3,%xmm1
jmp .Lhtable_polyval_main_loop
.Lhtable_polyval_no_prefix:
vpxor %xmm1,%xmm1,%xmm1
vmovdqa (%rcx),%xmm9
.align 64
.Lhtable_polyval_main_loop:
subq $0x80,%rdx
jb .Lhtable_polyval_out
vmovdqu 112(%rsi),%xmm0
vpclmulqdq $0x01,(%rdi),%xmm0,%xmm5
vpclmulqdq $0x00,(%rdi),%xmm0,%xmm3
vpclmulqdq $0x11,(%rdi),%xmm0,%xmm4
vpclmulqdq $0x10,(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vmovdqu 96(%rsi),%xmm0
vpclmulqdq $0x01,16(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x00,16(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm3,%xmm3
vpclmulqdq $0x11,16(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm4,%xmm4
vpclmulqdq $0x10,16(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vmovdqu 80(%rsi),%xmm0
vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm7
vpalignr $8,%xmm1,%xmm1,%xmm1
vpclmulqdq $0x01,32(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x00,32(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm3,%xmm3
vpclmulqdq $0x11,32(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm4,%xmm4
vpclmulqdq $0x10,32(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpxor %xmm7,%xmm1,%xmm1
vmovdqu 64(%rsi),%xmm0
vpclmulqdq $0x01,48(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x00,48(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm3,%xmm3
vpclmulqdq $0x11,48(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm4,%xmm4
vpclmulqdq $0x10,48(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vmovdqu 48(%rsi),%xmm0
vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm7
vpalignr $8,%xmm1,%xmm1,%xmm1
vpclmulqdq $0x01,64(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x00,64(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm3,%xmm3
vpclmulqdq $0x11,64(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm4,%xmm4
vpclmulqdq $0x10,64(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpxor %xmm7,%xmm1,%xmm1
vmovdqu 32(%rsi),%xmm0
vpclmulqdq $0x01,80(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x00,80(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm3,%xmm3
vpclmulqdq $0x11,80(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm4,%xmm4
vpclmulqdq $0x10,80(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpxor %xmm9,%xmm1,%xmm1
vmovdqu 16(%rsi),%xmm0
vpclmulqdq $0x01,96(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x00,96(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm3,%xmm3
vpclmulqdq $0x11,96(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm4,%xmm4
vpclmulqdq $0x10,96(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vmovdqu 0(%rsi),%xmm0
vpxor %xmm1,%xmm0,%xmm0
vpclmulqdq $0x01,112(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x00,112(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm3,%xmm3
vpclmulqdq $0x11,112(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm4,%xmm4
vpclmulqdq $0x10,112(%rdi),%xmm0,%xmm6
vpxor %xmm6,%xmm5,%xmm5
vpsrldq $8,%xmm5,%xmm6
vpslldq $8,%xmm5,%xmm5
vpxor %xmm6,%xmm4,%xmm9
vpxor %xmm5,%xmm3,%xmm1
leaq 128(%rsi),%rsi
jmp .Lhtable_polyval_main_loop
.Lhtable_polyval_out:
vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm6
vpalignr $8,%xmm1,%xmm1,%xmm1
vpxor %xmm6,%xmm1,%xmm1
vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm6
vpalignr $8,%xmm1,%xmm1,%xmm1
vpxor %xmm6,%xmm1,%xmm1
vpxor %xmm9,%xmm1,%xmm1
vmovdqu %xmm1,(%rcx)
vzeroupper
.byte 0xf3,0xc3
.cfi_endproc
.size aesgcmsiv_htable_polyval,.-aesgcmsiv_htable_polyval
.globl aesgcmsiv_polyval_horner
.hidden aesgcmsiv_polyval_horner
.type aesgcmsiv_polyval_horner,@function
.align 16
aesgcmsiv_polyval_horner:
.cfi_startproc
_CET_ENDBR
testq %rcx,%rcx
jnz .Lpolyval_horner_start
.byte 0xf3,0xc3
.Lpolyval_horner_start:
xorq %r10,%r10
shlq $4,%rcx
vmovdqa (%rsi),%xmm1
vmovdqa (%rdi),%xmm0
.Lpolyval_horner_loop:
vpxor (%rdx,%r10,1),%xmm0,%xmm0
call GFMUL
addq $16,%r10
cmpq %r10,%rcx
jne .Lpolyval_horner_loop
vmovdqa %xmm0,(%rdi)
.byte 0xf3,0xc3
.cfi_endproc
.size aesgcmsiv_polyval_horner,.-aesgcmsiv_polyval_horner
.globl aes128gcmsiv_aes_ks
.hidden aes128gcmsiv_aes_ks
.type aes128gcmsiv_aes_ks,@function
.align 16
aes128gcmsiv_aes_ks:
.cfi_startproc
_CET_ENDBR
vmovdqu (%rdi),%xmm1
vmovdqa %xmm1,(%rsi)
vmovdqa con1(%rip),%xmm0
vmovdqa mask(%rip),%xmm15
movq $8,%rax
.Lks128_loop:
addq $16,%rsi
subq $1,%rax
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpslldq $4,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpslldq $4,%xmm3,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpslldq $4,%xmm3,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm1,(%rsi)
jne .Lks128_loop
vmovdqa con2(%rip),%xmm0
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpslldq $4,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpslldq $4,%xmm3,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpslldq $4,%xmm3,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm1,16(%rsi)
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslldq $4,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpslldq $4,%xmm3,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpslldq $4,%xmm3,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm1,32(%rsi)
.byte 0xf3,0xc3
.cfi_endproc
.size aes128gcmsiv_aes_ks,.-aes128gcmsiv_aes_ks
.globl aes256gcmsiv_aes_ks
.hidden aes256gcmsiv_aes_ks
.type aes256gcmsiv_aes_ks,@function
.align 16
aes256gcmsiv_aes_ks:
.cfi_startproc
_CET_ENDBR
vmovdqu (%rdi),%xmm1
vmovdqu 16(%rdi),%xmm3
vmovdqa %xmm1,(%rsi)
vmovdqa %xmm3,16(%rsi)
vmovdqa con1(%rip),%xmm0
vmovdqa mask(%rip),%xmm15
vpxor %xmm14,%xmm14,%xmm14
movq $6,%rax
.Lks256_loop:
addq $32,%rsi
subq $1,%rax
vpshufb %xmm15,%xmm3,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpsllq $32,%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm1,(%rsi)
vpshufd $0xff,%xmm1,%xmm2
vaesenclast %xmm14,%xmm2,%xmm2
vpsllq $32,%xmm3,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpshufb con3(%rip),%xmm3,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpxor %xmm2,%xmm3,%xmm3
vmovdqa %xmm3,16(%rsi)
jne .Lks256_loop
vpshufb %xmm15,%xmm3,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpsllq $32,%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm1,32(%rsi)
.byte 0xf3,0xc3
.cfi_endproc
.globl aes128gcmsiv_aes_ks_enc_x1
.hidden aes128gcmsiv_aes_ks_enc_x1
.type aes128gcmsiv_aes_ks_enc_x1,@function
.align 16
aes128gcmsiv_aes_ks_enc_x1:
.cfi_startproc
_CET_ENDBR
vmovdqa (%rcx),%xmm1
vmovdqa 0(%rdi),%xmm4
vmovdqa %xmm1,(%rdx)
vpxor %xmm1,%xmm4,%xmm4
vmovdqa con1(%rip),%xmm0
vmovdqa mask(%rip),%xmm15
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpsllq $32,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vmovdqa %xmm1,16(%rdx)
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpsllq $32,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vmovdqa %xmm1,32(%rdx)
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpsllq $32,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vmovdqa %xmm1,48(%rdx)
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpsllq $32,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vmovdqa %xmm1,64(%rdx)
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpsllq $32,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vmovdqa %xmm1,80(%rdx)
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpsllq $32,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vmovdqa %xmm1,96(%rdx)
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpsllq $32,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vmovdqa %xmm1,112(%rdx)
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpsllq $32,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vmovdqa %xmm1,128(%rdx)
vmovdqa con2(%rip),%xmm0
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpsllq $32,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vmovdqa %xmm1,144(%rdx)
vpshufb %xmm15,%xmm1,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpsllq $32,%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpshufb con3(%rip),%xmm1,%xmm3
vpxor %xmm3,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenclast %xmm1,%xmm4,%xmm4
vmovdqa %xmm1,160(%rdx)
vmovdqa %xmm4,0(%rsi)
.byte 0xf3,0xc3
.cfi_endproc
.size aes128gcmsiv_aes_ks_enc_x1,.-aes128gcmsiv_aes_ks_enc_x1
.globl aes128gcmsiv_kdf
.hidden aes128gcmsiv_kdf
.type aes128gcmsiv_kdf,@function
.align 16
aes128gcmsiv_kdf:
.cfi_startproc
_CET_ENDBR
vmovdqa (%rdx),%xmm1
vmovdqa 0(%rdi),%xmm9
vmovdqa and_mask(%rip),%xmm12
vmovdqa one(%rip),%xmm13
vpshufd $0x90,%xmm9,%xmm9
vpand %xmm12,%xmm9,%xmm9
vpaddd %xmm13,%xmm9,%xmm10
vpaddd %xmm13,%xmm10,%xmm11
vpaddd %xmm13,%xmm11,%xmm12
vpxor %xmm1,%xmm9,%xmm9
vpxor %xmm1,%xmm10,%xmm10
vpxor %xmm1,%xmm11,%xmm11
vpxor %xmm1,%xmm12,%xmm12
vmovdqa 16(%rdx),%xmm1
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vmovdqa 32(%rdx),%xmm2
vaesenc %xmm2,%xmm9,%xmm9
vaesenc %xmm2,%xmm10,%xmm10
vaesenc %xmm2,%xmm11,%xmm11
vaesenc %xmm2,%xmm12,%xmm12
vmovdqa 48(%rdx),%xmm1
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vmovdqa 64(%rdx),%xmm2
vaesenc %xmm2,%xmm9,%xmm9
vaesenc %xmm2,%xmm10,%xmm10
vaesenc %xmm2,%xmm11,%xmm11
vaesenc %xmm2,%xmm12,%xmm12
vmovdqa 80(%rdx),%xmm1
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vmovdqa 96(%rdx),%xmm2
vaesenc %xmm2,%xmm9,%xmm9
vaesenc %xmm2,%xmm10,%xmm10
vaesenc %xmm2,%xmm11,%xmm11
vaesenc %xmm2,%xmm12,%xmm12
vmovdqa 112(%rdx),%xmm1
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vmovdqa 128(%rdx),%xmm2
vaesenc %xmm2,%xmm9,%xmm9
vaesenc %xmm2,%xmm10,%xmm10
vaesenc %xmm2,%xmm11,%xmm11
vaesenc %xmm2,%xmm12,%xmm12
vmovdqa 144(%rdx),%xmm1
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vmovdqa 160(%rdx),%xmm2
vaesenclast %xmm2,%xmm9,%xmm9
vaesenclast %xmm2,%xmm10,%xmm10
vaesenclast %xmm2,%xmm11,%xmm11
vaesenclast %xmm2,%xmm12,%xmm12
vmovdqa %xmm9,0(%rsi)
vmovdqa %xmm10,16(%rsi)
vmovdqa %xmm11,32(%rsi)
vmovdqa %xmm12,48(%rsi)
.byte 0xf3,0xc3
.cfi_endproc
.size aes128gcmsiv_kdf,.-aes128gcmsiv_kdf
.globl aes128gcmsiv_enc_msg_x4
.hidden aes128gcmsiv_enc_msg_x4
.type aes128gcmsiv_enc_msg_x4,@function
.align 16
aes128gcmsiv_enc_msg_x4:
.cfi_startproc
_CET_ENDBR
testq %r8,%r8
jnz .L128_enc_msg_x4_start
.byte 0xf3,0xc3
.L128_enc_msg_x4_start:
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
shrq $4,%r8
movq %r8,%r10
shlq $62,%r10
shrq $62,%r10
vmovdqa (%rdx),%xmm15
vpor OR_MASK(%rip),%xmm15,%xmm15
vmovdqu four(%rip),%xmm4
vmovdqa %xmm15,%xmm0
vpaddd one(%rip),%xmm15,%xmm1
vpaddd two(%rip),%xmm15,%xmm2
vpaddd three(%rip),%xmm15,%xmm3
shrq $2,%r8
je .L128_enc_msg_x4_check_remainder
subq $64,%rsi
subq $64,%rdi
.L128_enc_msg_x4_loop1:
addq $64,%rsi
addq $64,%rdi
vmovdqa %xmm0,%xmm5
vmovdqa %xmm1,%xmm6
vmovdqa %xmm2,%xmm7
vmovdqa %xmm3,%xmm8
vpxor (%rcx),%xmm5,%xmm5
vpxor (%rcx),%xmm6,%xmm6
vpxor (%rcx),%xmm7,%xmm7
vpxor (%rcx),%xmm8,%xmm8
vmovdqu 16(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vpaddd %xmm4,%xmm0,%xmm0
vmovdqu 32(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vpaddd %xmm4,%xmm1,%xmm1
vmovdqu 48(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vpaddd %xmm4,%xmm2,%xmm2
vmovdqu 64(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vpaddd %xmm4,%xmm3,%xmm3
vmovdqu 80(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 96(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 112(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 128(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 144(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 160(%rcx),%xmm12
vaesenclast %xmm12,%xmm5,%xmm5
vaesenclast %xmm12,%xmm6,%xmm6
vaesenclast %xmm12,%xmm7,%xmm7
vaesenclast %xmm12,%xmm8,%xmm8
vpxor 0(%rdi),%xmm5,%xmm5
vpxor 16(%rdi),%xmm6,%xmm6
vpxor 32(%rdi),%xmm7,%xmm7
vpxor 48(%rdi),%xmm8,%xmm8
subq $1,%r8
vmovdqu %xmm5,0(%rsi)
vmovdqu %xmm6,16(%rsi)
vmovdqu %xmm7,32(%rsi)
vmovdqu %xmm8,48(%rsi)
jne .L128_enc_msg_x4_loop1
addq $64,%rsi
addq $64,%rdi
.L128_enc_msg_x4_check_remainder:
cmpq $0,%r10
je .L128_enc_msg_x4_out
.L128_enc_msg_x4_loop2:
vmovdqa %xmm0,%xmm5
vpaddd one(%rip),%xmm0,%xmm0
vpxor (%rcx),%xmm5,%xmm5
vaesenc 16(%rcx),%xmm5,%xmm5
vaesenc 32(%rcx),%xmm5,%xmm5
vaesenc 48(%rcx),%xmm5,%xmm5
vaesenc 64(%rcx),%xmm5,%xmm5
vaesenc 80(%rcx),%xmm5,%xmm5
vaesenc 96(%rcx),%xmm5,%xmm5
vaesenc 112(%rcx),%xmm5,%xmm5
vaesenc 128(%rcx),%xmm5,%xmm5
vaesenc 144(%rcx),%xmm5,%xmm5
vaesenclast 160(%rcx),%xmm5,%xmm5
vpxor (%rdi),%xmm5,%xmm5
vmovdqu %xmm5,(%rsi)
addq $16,%rdi
addq $16,%rsi
subq $1,%r10
jne .L128_enc_msg_x4_loop2
.L128_enc_msg_x4_out:
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
.byte 0xf3,0xc3
.cfi_endproc
.size aes128gcmsiv_enc_msg_x4,.-aes128gcmsiv_enc_msg_x4
.globl aes128gcmsiv_enc_msg_x8
.hidden aes128gcmsiv_enc_msg_x8
.type aes128gcmsiv_enc_msg_x8,@function
.align 16
aes128gcmsiv_enc_msg_x8:
.cfi_startproc
_CET_ENDBR
testq %r8,%r8
jnz .L128_enc_msg_x8_start
.byte 0xf3,0xc3
.L128_enc_msg_x8_start:
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-32
movq %rsp,%rbp
.cfi_def_cfa_register rbp
subq $128,%rsp
andq $-64,%rsp
shrq $4,%r8
movq %r8,%r10
shlq $61,%r10
shrq $61,%r10
vmovdqu (%rdx),%xmm1
vpor OR_MASK(%rip),%xmm1,%xmm1
vpaddd seven(%rip),%xmm1,%xmm0
vmovdqu %xmm0,(%rsp)
vpaddd one(%rip),%xmm1,%xmm9
vpaddd two(%rip),%xmm1,%xmm10
vpaddd three(%rip),%xmm1,%xmm11
vpaddd four(%rip),%xmm1,%xmm12
vpaddd five(%rip),%xmm1,%xmm13
vpaddd six(%rip),%xmm1,%xmm14
vmovdqa %xmm1,%xmm0
shrq $3,%r8
je .L128_enc_msg_x8_check_remainder
subq $128,%rsi
subq $128,%rdi
.L128_enc_msg_x8_loop1:
addq $128,%rsi
addq $128,%rdi
vmovdqa %xmm0,%xmm1
vmovdqa %xmm9,%xmm2
vmovdqa %xmm10,%xmm3
vmovdqa %xmm11,%xmm4
vmovdqa %xmm12,%xmm5
vmovdqa %xmm13,%xmm6
vmovdqa %xmm14,%xmm7
vmovdqu (%rsp),%xmm8
vpxor (%rcx),%xmm1,%xmm1
vpxor (%rcx),%xmm2,%xmm2
vpxor (%rcx),%xmm3,%xmm3
vpxor (%rcx),%xmm4,%xmm4
vpxor (%rcx),%xmm5,%xmm5
vpxor (%rcx),%xmm6,%xmm6
vpxor (%rcx),%xmm7,%xmm7
vpxor (%rcx),%xmm8,%xmm8
vmovdqu 16(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vmovdqu (%rsp),%xmm14
vpaddd eight(%rip),%xmm14,%xmm14
vmovdqu %xmm14,(%rsp)
vmovdqu 32(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpsubd one(%rip),%xmm14,%xmm14
vmovdqu 48(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm0,%xmm0
vmovdqu 64(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm9,%xmm9
vmovdqu 80(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm10,%xmm10
vmovdqu 96(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm11,%xmm11
vmovdqu 112(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm12,%xmm12
vmovdqu 128(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm13,%xmm13
vmovdqu 144(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vmovdqu 160(%rcx),%xmm15
vaesenclast %xmm15,%xmm1,%xmm1
vaesenclast %xmm15,%xmm2,%xmm2
vaesenclast %xmm15,%xmm3,%xmm3
vaesenclast %xmm15,%xmm4,%xmm4
vaesenclast %xmm15,%xmm5,%xmm5
vaesenclast %xmm15,%xmm6,%xmm6
vaesenclast %xmm15,%xmm7,%xmm7
vaesenclast %xmm15,%xmm8,%xmm8
vpxor 0(%rdi),%xmm1,%xmm1
vpxor 16(%rdi),%xmm2,%xmm2
vpxor 32(%rdi),%xmm3,%xmm3
vpxor 48(%rdi),%xmm4,%xmm4
vpxor 64(%rdi),%xmm5,%xmm5
vpxor 80(%rdi),%xmm6,%xmm6
vpxor 96(%rdi),%xmm7,%xmm7
vpxor 112(%rdi),%xmm8,%xmm8
decq %r8
vmovdqu %xmm1,0(%rsi)
vmovdqu %xmm2,16(%rsi)
vmovdqu %xmm3,32(%rsi)
vmovdqu %xmm4,48(%rsi)
vmovdqu %xmm5,64(%rsi)
vmovdqu %xmm6,80(%rsi)
vmovdqu %xmm7,96(%rsi)
vmovdqu %xmm8,112(%rsi)
jne .L128_enc_msg_x8_loop1
addq $128,%rsi
addq $128,%rdi
.L128_enc_msg_x8_check_remainder:
cmpq $0,%r10
je .L128_enc_msg_x8_out
.L128_enc_msg_x8_loop2:
vmovdqa %xmm0,%xmm1
vpaddd one(%rip),%xmm0,%xmm0
vpxor (%rcx),%xmm1,%xmm1
vaesenc 16(%rcx),%xmm1,%xmm1
vaesenc 32(%rcx),%xmm1,%xmm1
vaesenc 48(%rcx),%xmm1,%xmm1
vaesenc 64(%rcx),%xmm1,%xmm1
vaesenc 80(%rcx),%xmm1,%xmm1
vaesenc 96(%rcx),%xmm1,%xmm1
vaesenc 112(%rcx),%xmm1,%xmm1
vaesenc 128(%rcx),%xmm1,%xmm1
vaesenc 144(%rcx),%xmm1,%xmm1
vaesenclast 160(%rcx),%xmm1,%xmm1
vpxor (%rdi),%xmm1,%xmm1
vmovdqu %xmm1,(%rsi)
addq $16,%rdi
addq $16,%rsi
decq %r10
jne .L128_enc_msg_x8_loop2
.L128_enc_msg_x8_out:
movq %rbp,%rsp
.cfi_def_cfa_register %rsp
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
.byte 0xf3,0xc3
.cfi_endproc
.size aes128gcmsiv_enc_msg_x8,.-aes128gcmsiv_enc_msg_x8
.globl aes128gcmsiv_dec
.hidden aes128gcmsiv_dec
.type aes128gcmsiv_dec,@function
.align 16
aes128gcmsiv_dec:
.cfi_startproc
_CET_ENDBR
testq $~15,%r9
jnz .L128_dec_start
.byte 0xf3,0xc3
.L128_dec_start:
vzeroupper
vmovdqa (%rdx),%xmm0
vmovdqu 16(%rdx),%xmm15
vpor OR_MASK(%rip),%xmm15,%xmm15
movq %rdx,%rax
leaq 32(%rax),%rax
leaq 32(%rcx),%rcx
andq $~15,%r9
cmpq $96,%r9
jb .L128_dec_loop2
subq $96,%r9
vmovdqa %xmm15,%xmm7
vpaddd one(%rip),%xmm7,%xmm8
vpaddd two(%rip),%xmm7,%xmm9
vpaddd one(%rip),%xmm9,%xmm10
vpaddd two(%rip),%xmm9,%xmm11
vpaddd one(%rip),%xmm11,%xmm12
vpaddd two(%rip),%xmm11,%xmm15
vpxor (%r8),%xmm7,%xmm7
vpxor (%r8),%xmm8,%xmm8
vpxor (%r8),%xmm9,%xmm9
vpxor (%r8),%xmm10,%xmm10
vpxor (%r8),%xmm11,%xmm11
vpxor (%r8),%xmm12,%xmm12
vmovdqu 16(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 32(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 48(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 64(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 80(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 96(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 112(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 128(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 144(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 160(%r8),%xmm4
vaesenclast %xmm4,%xmm7,%xmm7
vaesenclast %xmm4,%xmm8,%xmm8
vaesenclast %xmm4,%xmm9,%xmm9
vaesenclast %xmm4,%xmm10,%xmm10
vaesenclast %xmm4,%xmm11,%xmm11
vaesenclast %xmm4,%xmm12,%xmm12
vpxor 0(%rdi),%xmm7,%xmm7
vpxor 16(%rdi),%xmm8,%xmm8
vpxor 32(%rdi),%xmm9,%xmm9
vpxor 48(%rdi),%xmm10,%xmm10
vpxor 64(%rdi),%xmm11,%xmm11
vpxor 80(%rdi),%xmm12,%xmm12
vmovdqu %xmm7,0(%rsi)
vmovdqu %xmm8,16(%rsi)
vmovdqu %xmm9,32(%rsi)
vmovdqu %xmm10,48(%rsi)
vmovdqu %xmm11,64(%rsi)
vmovdqu %xmm12,80(%rsi)
addq $96,%rdi
addq $96,%rsi
jmp .L128_dec_loop1
.align 64
.L128_dec_loop1:
cmpq $96,%r9
jb .L128_dec_finish_96
subq $96,%r9
vmovdqa %xmm12,%xmm6
vmovdqa %xmm11,16-32(%rax)
vmovdqa %xmm10,32-32(%rax)
vmovdqa %xmm9,48-32(%rax)
vmovdqa %xmm8,64-32(%rax)
vmovdqa %xmm7,80-32(%rax)
vmovdqa %xmm15,%xmm7
vpaddd one(%rip),%xmm7,%xmm8
vpaddd two(%rip),%xmm7,%xmm9
vpaddd one(%rip),%xmm9,%xmm10
vpaddd two(%rip),%xmm9,%xmm11
vpaddd one(%rip),%xmm11,%xmm12
vpaddd two(%rip),%xmm11,%xmm15
vmovdqa (%r8),%xmm4
vpxor %xmm4,%xmm7,%xmm7
vpxor %xmm4,%xmm8,%xmm8
vpxor %xmm4,%xmm9,%xmm9
vpxor %xmm4,%xmm10,%xmm10
vpxor %xmm4,%xmm11,%xmm11
vpxor %xmm4,%xmm12,%xmm12
vmovdqu 0-32(%rcx),%xmm4
vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2
vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3
vpclmulqdq $0x01,%xmm4,%xmm6,%xmm1
vpclmulqdq $0x10,%xmm4,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 16(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu -16(%rax),%xmm6
vmovdqu -16(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 32(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 0(%rax),%xmm6
vmovdqu 0(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 48(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 16(%rax),%xmm6
vmovdqu 16(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 64(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 32(%rax),%xmm6
vmovdqu 32(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 80(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 96(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 112(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqa 80-32(%rax),%xmm6
vpxor %xmm0,%xmm6,%xmm6
vmovdqu 80-32(%rcx),%xmm5
vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 128(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vpsrldq $8,%xmm1,%xmm4
vpxor %xmm4,%xmm2,%xmm5
vpslldq $8,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm0
vmovdqa poly(%rip),%xmm3
vmovdqu 144(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 160(%r8),%xmm6
vpalignr $8,%xmm0,%xmm0,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0
vpxor %xmm0,%xmm2,%xmm0
vpxor 0(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm7,%xmm7
vpxor 16(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm8,%xmm8
vpxor 32(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm9,%xmm9
vpxor 48(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm10,%xmm10
vpxor 64(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm11,%xmm11
vpxor 80(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm12,%xmm12
vpalignr $8,%xmm0,%xmm0,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0
vpxor %xmm0,%xmm2,%xmm0
vmovdqu %xmm7,0(%rsi)
vmovdqu %xmm8,16(%rsi)
vmovdqu %xmm9,32(%rsi)
vmovdqu %xmm10,48(%rsi)
vmovdqu %xmm11,64(%rsi)
vmovdqu %xmm12,80(%rsi)
vpxor %xmm5,%xmm0,%xmm0
leaq 96(%rdi),%rdi
leaq 96(%rsi),%rsi
jmp .L128_dec_loop1
.L128_dec_finish_96:
vmovdqa %xmm12,%xmm6
vmovdqa %xmm11,16-32(%rax)
vmovdqa %xmm10,32-32(%rax)
vmovdqa %xmm9,48-32(%rax)
vmovdqa %xmm8,64-32(%rax)
vmovdqa %xmm7,80-32(%rax)
vmovdqu 0-32(%rcx),%xmm4
vpclmulqdq $0x10,%xmm4,%xmm6,%xmm1
vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2
vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3
vpclmulqdq $0x01,%xmm4,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu -16(%rax),%xmm6
vmovdqu -16(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 0(%rax),%xmm6
vmovdqu 0(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 16(%rax),%xmm6
vmovdqu 16(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 32(%rax),%xmm6
vmovdqu 32(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 80-32(%rax),%xmm6
vpxor %xmm0,%xmm6,%xmm6
vmovdqu 80-32(%rcx),%xmm5
vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpsrldq $8,%xmm1,%xmm4
vpxor %xmm4,%xmm2,%xmm5
vpslldq $8,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm0
vmovdqa poly(%rip),%xmm3
vpalignr $8,%xmm0,%xmm0,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0
vpxor %xmm0,%xmm2,%xmm0
vpalignr $8,%xmm0,%xmm0,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0
vpxor %xmm0,%xmm2,%xmm0
vpxor %xmm5,%xmm0,%xmm0
.L128_dec_loop2:
cmpq $16,%r9
jb .L128_dec_out
subq $16,%r9
vmovdqa %xmm15,%xmm2
vpaddd one(%rip),%xmm15,%xmm15
vpxor 0(%r8),%xmm2,%xmm2
vaesenc 16(%r8),%xmm2,%xmm2
vaesenc 32(%r8),%xmm2,%xmm2
vaesenc 48(%r8),%xmm2,%xmm2
vaesenc 64(%r8),%xmm2,%xmm2
vaesenc 80(%r8),%xmm2,%xmm2
vaesenc 96(%r8),%xmm2,%xmm2
vaesenc 112(%r8),%xmm2,%xmm2
vaesenc 128(%r8),%xmm2,%xmm2
vaesenc 144(%r8),%xmm2,%xmm2
vaesenclast 160(%r8),%xmm2,%xmm2
vpxor (%rdi),%xmm2,%xmm2
vmovdqu %xmm2,(%rsi)
addq $16,%rdi
addq $16,%rsi
vpxor %xmm2,%xmm0,%xmm0
vmovdqa -32(%rcx),%xmm1
call GFMUL
jmp .L128_dec_loop2
.L128_dec_out:
vmovdqu %xmm0,(%rdx)
.byte 0xf3,0xc3
.cfi_endproc
.size aes128gcmsiv_dec, .-aes128gcmsiv_dec
.globl aes128gcmsiv_ecb_enc_block
.hidden aes128gcmsiv_ecb_enc_block
.type aes128gcmsiv_ecb_enc_block,@function
.align 16
aes128gcmsiv_ecb_enc_block:
.cfi_startproc
_CET_ENDBR
vmovdqa (%rdi),%xmm1
vpxor (%rdx),%xmm1,%xmm1
vaesenc 16(%rdx),%xmm1,%xmm1
vaesenc 32(%rdx),%xmm1,%xmm1
vaesenc 48(%rdx),%xmm1,%xmm1
vaesenc 64(%rdx),%xmm1,%xmm1
vaesenc 80(%rdx),%xmm1,%xmm1
vaesenc 96(%rdx),%xmm1,%xmm1
vaesenc 112(%rdx),%xmm1,%xmm1
vaesenc 128(%rdx),%xmm1,%xmm1
vaesenc 144(%rdx),%xmm1,%xmm1
vaesenclast 160(%rdx),%xmm1,%xmm1
vmovdqa %xmm1,(%rsi)
.byte 0xf3,0xc3
.cfi_endproc
.size aes128gcmsiv_ecb_enc_block,.-aes128gcmsiv_ecb_enc_block
.globl aes256gcmsiv_aes_ks_enc_x1
.hidden aes256gcmsiv_aes_ks_enc_x1
.type aes256gcmsiv_aes_ks_enc_x1,@function
.align 16
aes256gcmsiv_aes_ks_enc_x1:
.cfi_startproc
_CET_ENDBR
vmovdqa con1(%rip),%xmm0
vmovdqa mask(%rip),%xmm15
vmovdqa (%rdi),%xmm8
vmovdqa (%rcx),%xmm1
vmovdqa 16(%rcx),%xmm3
vpxor %xmm1,%xmm8,%xmm8
vaesenc %xmm3,%xmm8,%xmm8
vmovdqu %xmm1,(%rdx)
vmovdqu %xmm3,16(%rdx)
vpxor %xmm14,%xmm14,%xmm14
vpshufb %xmm15,%xmm3,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpslldq $4,%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm8,%xmm8
vmovdqu %xmm1,32(%rdx)
vpshufd $0xff,%xmm1,%xmm2
vaesenclast %xmm14,%xmm2,%xmm2
vpslldq $4,%xmm3,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpxor %xmm2,%xmm3,%xmm3
vaesenc %xmm3,%xmm8,%xmm8
vmovdqu %xmm3,48(%rdx)
vpshufb %xmm15,%xmm3,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpslldq $4,%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm8,%xmm8
vmovdqu %xmm1,64(%rdx)
vpshufd $0xff,%xmm1,%xmm2
vaesenclast %xmm14,%xmm2,%xmm2
vpslldq $4,%xmm3,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpxor %xmm2,%xmm3,%xmm3
vaesenc %xmm3,%xmm8,%xmm8
vmovdqu %xmm3,80(%rdx)
vpshufb %xmm15,%xmm3,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpslldq $4,%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm8,%xmm8
vmovdqu %xmm1,96(%rdx)
vpshufd $0xff,%xmm1,%xmm2
vaesenclast %xmm14,%xmm2,%xmm2
vpslldq $4,%xmm3,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpxor %xmm2,%xmm3,%xmm3
vaesenc %xmm3,%xmm8,%xmm8
vmovdqu %xmm3,112(%rdx)
vpshufb %xmm15,%xmm3,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpslldq $4,%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm8,%xmm8
vmovdqu %xmm1,128(%rdx)
vpshufd $0xff,%xmm1,%xmm2
vaesenclast %xmm14,%xmm2,%xmm2
vpslldq $4,%xmm3,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpxor %xmm2,%xmm3,%xmm3
vaesenc %xmm3,%xmm8,%xmm8
vmovdqu %xmm3,144(%rdx)
vpshufb %xmm15,%xmm3,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpslldq $4,%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm8,%xmm8
vmovdqu %xmm1,160(%rdx)
vpshufd $0xff,%xmm1,%xmm2
vaesenclast %xmm14,%xmm2,%xmm2
vpslldq $4,%xmm3,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpxor %xmm2,%xmm3,%xmm3
vaesenc %xmm3,%xmm8,%xmm8
vmovdqu %xmm3,176(%rdx)
vpshufb %xmm15,%xmm3,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslld $1,%xmm0,%xmm0
vpslldq $4,%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenc %xmm1,%xmm8,%xmm8
vmovdqu %xmm1,192(%rdx)
vpshufd $0xff,%xmm1,%xmm2
vaesenclast %xmm14,%xmm2,%xmm2
vpslldq $4,%xmm3,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpxor %xmm2,%xmm3,%xmm3
vaesenc %xmm3,%xmm8,%xmm8
vmovdqu %xmm3,208(%rdx)
vpshufb %xmm15,%xmm3,%xmm2
vaesenclast %xmm0,%xmm2,%xmm2
vpslldq $4,%xmm1,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpslldq $4,%xmm4,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vaesenclast %xmm1,%xmm8,%xmm8
vmovdqu %xmm1,224(%rdx)
vmovdqa %xmm8,(%rsi)
.byte 0xf3,0xc3
.cfi_endproc
.size aes256gcmsiv_aes_ks_enc_x1,.-aes256gcmsiv_aes_ks_enc_x1
.globl aes256gcmsiv_ecb_enc_block
.hidden aes256gcmsiv_ecb_enc_block
.type aes256gcmsiv_ecb_enc_block,@function
.align 16
aes256gcmsiv_ecb_enc_block:
.cfi_startproc
_CET_ENDBR
vmovdqa (%rdi),%xmm1
vpxor (%rdx),%xmm1,%xmm1
vaesenc 16(%rdx),%xmm1,%xmm1
vaesenc 32(%rdx),%xmm1,%xmm1
vaesenc 48(%rdx),%xmm1,%xmm1
vaesenc 64(%rdx),%xmm1,%xmm1
vaesenc 80(%rdx),%xmm1,%xmm1
vaesenc 96(%rdx),%xmm1,%xmm1
vaesenc 112(%rdx),%xmm1,%xmm1
vaesenc 128(%rdx),%xmm1,%xmm1
vaesenc 144(%rdx),%xmm1,%xmm1
vaesenc 160(%rdx),%xmm1,%xmm1
vaesenc 176(%rdx),%xmm1,%xmm1
vaesenc 192(%rdx),%xmm1,%xmm1
vaesenc 208(%rdx),%xmm1,%xmm1
vaesenclast 224(%rdx),%xmm1,%xmm1
vmovdqa %xmm1,(%rsi)
.byte 0xf3,0xc3
.cfi_endproc
.size aes256gcmsiv_ecb_enc_block,.-aes256gcmsiv_ecb_enc_block
.globl aes256gcmsiv_enc_msg_x4
.hidden aes256gcmsiv_enc_msg_x4
.type aes256gcmsiv_enc_msg_x4,@function
.align 16
aes256gcmsiv_enc_msg_x4:
.cfi_startproc
_CET_ENDBR
testq %r8,%r8
jnz .L256_enc_msg_x4_start
.byte 0xf3,0xc3
.L256_enc_msg_x4_start:
movq %r8,%r10
shrq $4,%r8
shlq $60,%r10
jz .L256_enc_msg_x4_start2
addq $1,%r8
.L256_enc_msg_x4_start2:
movq %r8,%r10
shlq $62,%r10
shrq $62,%r10
vmovdqa (%rdx),%xmm15
vpor OR_MASK(%rip),%xmm15,%xmm15
vmovdqa four(%rip),%xmm4
vmovdqa %xmm15,%xmm0
vpaddd one(%rip),%xmm15,%xmm1
vpaddd two(%rip),%xmm15,%xmm2
vpaddd three(%rip),%xmm15,%xmm3
shrq $2,%r8
je .L256_enc_msg_x4_check_remainder
subq $64,%rsi
subq $64,%rdi
.L256_enc_msg_x4_loop1:
addq $64,%rsi
addq $64,%rdi
vmovdqa %xmm0,%xmm5
vmovdqa %xmm1,%xmm6
vmovdqa %xmm2,%xmm7
vmovdqa %xmm3,%xmm8
vpxor (%rcx),%xmm5,%xmm5
vpxor (%rcx),%xmm6,%xmm6
vpxor (%rcx),%xmm7,%xmm7
vpxor (%rcx),%xmm8,%xmm8
vmovdqu 16(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vpaddd %xmm4,%xmm0,%xmm0
vmovdqu 32(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vpaddd %xmm4,%xmm1,%xmm1
vmovdqu 48(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vpaddd %xmm4,%xmm2,%xmm2
vmovdqu 64(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vpaddd %xmm4,%xmm3,%xmm3
vmovdqu 80(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 96(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 112(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 128(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 144(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 160(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 176(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 192(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 208(%rcx),%xmm12
vaesenc %xmm12,%xmm5,%xmm5
vaesenc %xmm12,%xmm6,%xmm6
vaesenc %xmm12,%xmm7,%xmm7
vaesenc %xmm12,%xmm8,%xmm8
vmovdqu 224(%rcx),%xmm12
vaesenclast %xmm12,%xmm5,%xmm5
vaesenclast %xmm12,%xmm6,%xmm6
vaesenclast %xmm12,%xmm7,%xmm7
vaesenclast %xmm12,%xmm8,%xmm8
vpxor 0(%rdi),%xmm5,%xmm5
vpxor 16(%rdi),%xmm6,%xmm6
vpxor 32(%rdi),%xmm7,%xmm7
vpxor 48(%rdi),%xmm8,%xmm8
subq $1,%r8
vmovdqu %xmm5,0(%rsi)
vmovdqu %xmm6,16(%rsi)
vmovdqu %xmm7,32(%rsi)
vmovdqu %xmm8,48(%rsi)
jne .L256_enc_msg_x4_loop1
addq $64,%rsi
addq $64,%rdi
.L256_enc_msg_x4_check_remainder:
cmpq $0,%r10
je .L256_enc_msg_x4_out
.L256_enc_msg_x4_loop2:
vmovdqa %xmm0,%xmm5
vpaddd one(%rip),%xmm0,%xmm0
vpxor (%rcx),%xmm5,%xmm5
vaesenc 16(%rcx),%xmm5,%xmm5
vaesenc 32(%rcx),%xmm5,%xmm5
vaesenc 48(%rcx),%xmm5,%xmm5
vaesenc 64(%rcx),%xmm5,%xmm5
vaesenc 80(%rcx),%xmm5,%xmm5
vaesenc 96(%rcx),%xmm5,%xmm5
vaesenc 112(%rcx),%xmm5,%xmm5
vaesenc 128(%rcx),%xmm5,%xmm5
vaesenc 144(%rcx),%xmm5,%xmm5
vaesenc 160(%rcx),%xmm5,%xmm5
vaesenc 176(%rcx),%xmm5,%xmm5
vaesenc 192(%rcx),%xmm5,%xmm5
vaesenc 208(%rcx),%xmm5,%xmm5
vaesenclast 224(%rcx),%xmm5,%xmm5
vpxor (%rdi),%xmm5,%xmm5
vmovdqu %xmm5,(%rsi)
addq $16,%rdi
addq $16,%rsi
subq $1,%r10
jne .L256_enc_msg_x4_loop2
.L256_enc_msg_x4_out:
.byte 0xf3,0xc3
.cfi_endproc
.size aes256gcmsiv_enc_msg_x4,.-aes256gcmsiv_enc_msg_x4
.globl aes256gcmsiv_enc_msg_x8
.hidden aes256gcmsiv_enc_msg_x8
.type aes256gcmsiv_enc_msg_x8,@function
.align 16
aes256gcmsiv_enc_msg_x8:
.cfi_startproc
_CET_ENDBR
testq %r8,%r8
jnz .L256_enc_msg_x8_start
.byte 0xf3,0xc3
.L256_enc_msg_x8_start:
movq %rsp,%r11
subq $16,%r11
andq $-64,%r11
movq %r8,%r10
shrq $4,%r8
shlq $60,%r10
jz .L256_enc_msg_x8_start2
addq $1,%r8
.L256_enc_msg_x8_start2:
movq %r8,%r10
shlq $61,%r10
shrq $61,%r10
vmovdqa (%rdx),%xmm1
vpor OR_MASK(%rip),%xmm1,%xmm1
vpaddd seven(%rip),%xmm1,%xmm0
vmovdqa %xmm0,(%r11)
vpaddd one(%rip),%xmm1,%xmm9
vpaddd two(%rip),%xmm1,%xmm10
vpaddd three(%rip),%xmm1,%xmm11
vpaddd four(%rip),%xmm1,%xmm12
vpaddd five(%rip),%xmm1,%xmm13
vpaddd six(%rip),%xmm1,%xmm14
vmovdqa %xmm1,%xmm0
shrq $3,%r8
jz .L256_enc_msg_x8_check_remainder
subq $128,%rsi
subq $128,%rdi
.L256_enc_msg_x8_loop1:
addq $128,%rsi
addq $128,%rdi
vmovdqa %xmm0,%xmm1
vmovdqa %xmm9,%xmm2
vmovdqa %xmm10,%xmm3
vmovdqa %xmm11,%xmm4
vmovdqa %xmm12,%xmm5
vmovdqa %xmm13,%xmm6
vmovdqa %xmm14,%xmm7
vmovdqa (%r11),%xmm8
vpxor (%rcx),%xmm1,%xmm1
vpxor (%rcx),%xmm2,%xmm2
vpxor (%rcx),%xmm3,%xmm3
vpxor (%rcx),%xmm4,%xmm4
vpxor (%rcx),%xmm5,%xmm5
vpxor (%rcx),%xmm6,%xmm6
vpxor (%rcx),%xmm7,%xmm7
vpxor (%rcx),%xmm8,%xmm8
vmovdqu 16(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vmovdqa (%r11),%xmm14
vpaddd eight(%rip),%xmm14,%xmm14
vmovdqa %xmm14,(%r11)
vmovdqu 32(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpsubd one(%rip),%xmm14,%xmm14
vmovdqu 48(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm0,%xmm0
vmovdqu 64(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm9,%xmm9
vmovdqu 80(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm10,%xmm10
vmovdqu 96(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm11,%xmm11
vmovdqu 112(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm12,%xmm12
vmovdqu 128(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vpaddd eight(%rip),%xmm13,%xmm13
vmovdqu 144(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vmovdqu 160(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vmovdqu 176(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vmovdqu 192(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vmovdqu 208(%rcx),%xmm15
vaesenc %xmm15,%xmm1,%xmm1
vaesenc %xmm15,%xmm2,%xmm2
vaesenc %xmm15,%xmm3,%xmm3
vaesenc %xmm15,%xmm4,%xmm4
vaesenc %xmm15,%xmm5,%xmm5
vaesenc %xmm15,%xmm6,%xmm6
vaesenc %xmm15,%xmm7,%xmm7
vaesenc %xmm15,%xmm8,%xmm8
vmovdqu 224(%rcx),%xmm15
vaesenclast %xmm15,%xmm1,%xmm1
vaesenclast %xmm15,%xmm2,%xmm2
vaesenclast %xmm15,%xmm3,%xmm3
vaesenclast %xmm15,%xmm4,%xmm4
vaesenclast %xmm15,%xmm5,%xmm5
vaesenclast %xmm15,%xmm6,%xmm6
vaesenclast %xmm15,%xmm7,%xmm7
vaesenclast %xmm15,%xmm8,%xmm8
vpxor 0(%rdi),%xmm1,%xmm1
vpxor 16(%rdi),%xmm2,%xmm2
vpxor 32(%rdi),%xmm3,%xmm3
vpxor 48(%rdi),%xmm4,%xmm4
vpxor 64(%rdi),%xmm5,%xmm5
vpxor 80(%rdi),%xmm6,%xmm6
vpxor 96(%rdi),%xmm7,%xmm7
vpxor 112(%rdi),%xmm8,%xmm8
subq $1,%r8
vmovdqu %xmm1,0(%rsi)
vmovdqu %xmm2,16(%rsi)
vmovdqu %xmm3,32(%rsi)
vmovdqu %xmm4,48(%rsi)
vmovdqu %xmm5,64(%rsi)
vmovdqu %xmm6,80(%rsi)
vmovdqu %xmm7,96(%rsi)
vmovdqu %xmm8,112(%rsi)
jne .L256_enc_msg_x8_loop1
addq $128,%rsi
addq $128,%rdi
.L256_enc_msg_x8_check_remainder:
cmpq $0,%r10
je .L256_enc_msg_x8_out
.L256_enc_msg_x8_loop2:
vmovdqa %xmm0,%xmm1
vpaddd one(%rip),%xmm0,%xmm0
vpxor (%rcx),%xmm1,%xmm1
vaesenc 16(%rcx),%xmm1,%xmm1
vaesenc 32(%rcx),%xmm1,%xmm1
vaesenc 48(%rcx),%xmm1,%xmm1
vaesenc 64(%rcx),%xmm1,%xmm1
vaesenc 80(%rcx),%xmm1,%xmm1
vaesenc 96(%rcx),%xmm1,%xmm1
vaesenc 112(%rcx),%xmm1,%xmm1
vaesenc 128(%rcx),%xmm1,%xmm1
vaesenc 144(%rcx),%xmm1,%xmm1
vaesenc 160(%rcx),%xmm1,%xmm1
vaesenc 176(%rcx),%xmm1,%xmm1
vaesenc 192(%rcx),%xmm1,%xmm1
vaesenc 208(%rcx),%xmm1,%xmm1
vaesenclast 224(%rcx),%xmm1,%xmm1
vpxor (%rdi),%xmm1,%xmm1
vmovdqu %xmm1,(%rsi)
addq $16,%rdi
addq $16,%rsi
subq $1,%r10
jnz .L256_enc_msg_x8_loop2
.L256_enc_msg_x8_out:
.byte 0xf3,0xc3
.cfi_endproc
.size aes256gcmsiv_enc_msg_x8,.-aes256gcmsiv_enc_msg_x8
.globl aes256gcmsiv_dec
.hidden aes256gcmsiv_dec
.type aes256gcmsiv_dec,@function
.align 16
aes256gcmsiv_dec:
.cfi_startproc
_CET_ENDBR
testq $~15,%r9
jnz .L256_dec_start
.byte 0xf3,0xc3
.L256_dec_start:
vzeroupper
vmovdqa (%rdx),%xmm0
vmovdqu 16(%rdx),%xmm15
vpor OR_MASK(%rip),%xmm15,%xmm15
movq %rdx,%rax
leaq 32(%rax),%rax
leaq 32(%rcx),%rcx
andq $~15,%r9
cmpq $96,%r9
jb .L256_dec_loop2
subq $96,%r9
vmovdqa %xmm15,%xmm7
vpaddd one(%rip),%xmm7,%xmm8
vpaddd two(%rip),%xmm7,%xmm9
vpaddd one(%rip),%xmm9,%xmm10
vpaddd two(%rip),%xmm9,%xmm11
vpaddd one(%rip),%xmm11,%xmm12
vpaddd two(%rip),%xmm11,%xmm15
vpxor (%r8),%xmm7,%xmm7
vpxor (%r8),%xmm8,%xmm8
vpxor (%r8),%xmm9,%xmm9
vpxor (%r8),%xmm10,%xmm10
vpxor (%r8),%xmm11,%xmm11
vpxor (%r8),%xmm12,%xmm12
vmovdqu 16(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 32(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 48(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 64(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 80(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 96(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 112(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 128(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 144(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 160(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 176(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 192(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 208(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 224(%r8),%xmm4
vaesenclast %xmm4,%xmm7,%xmm7
vaesenclast %xmm4,%xmm8,%xmm8
vaesenclast %xmm4,%xmm9,%xmm9
vaesenclast %xmm4,%xmm10,%xmm10
vaesenclast %xmm4,%xmm11,%xmm11
vaesenclast %xmm4,%xmm12,%xmm12
vpxor 0(%rdi),%xmm7,%xmm7
vpxor 16(%rdi),%xmm8,%xmm8
vpxor 32(%rdi),%xmm9,%xmm9
vpxor 48(%rdi),%xmm10,%xmm10
vpxor 64(%rdi),%xmm11,%xmm11
vpxor 80(%rdi),%xmm12,%xmm12
vmovdqu %xmm7,0(%rsi)
vmovdqu %xmm8,16(%rsi)
vmovdqu %xmm9,32(%rsi)
vmovdqu %xmm10,48(%rsi)
vmovdqu %xmm11,64(%rsi)
vmovdqu %xmm12,80(%rsi)
addq $96,%rdi
addq $96,%rsi
jmp .L256_dec_loop1
.align 64
.L256_dec_loop1:
cmpq $96,%r9
jb .L256_dec_finish_96
subq $96,%r9
vmovdqa %xmm12,%xmm6
vmovdqa %xmm11,16-32(%rax)
vmovdqa %xmm10,32-32(%rax)
vmovdqa %xmm9,48-32(%rax)
vmovdqa %xmm8,64-32(%rax)
vmovdqa %xmm7,80-32(%rax)
vmovdqa %xmm15,%xmm7
vpaddd one(%rip),%xmm7,%xmm8
vpaddd two(%rip),%xmm7,%xmm9
vpaddd one(%rip),%xmm9,%xmm10
vpaddd two(%rip),%xmm9,%xmm11
vpaddd one(%rip),%xmm11,%xmm12
vpaddd two(%rip),%xmm11,%xmm15
vmovdqa (%r8),%xmm4
vpxor %xmm4,%xmm7,%xmm7
vpxor %xmm4,%xmm8,%xmm8
vpxor %xmm4,%xmm9,%xmm9
vpxor %xmm4,%xmm10,%xmm10
vpxor %xmm4,%xmm11,%xmm11
vpxor %xmm4,%xmm12,%xmm12
vmovdqu 0-32(%rcx),%xmm4
vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2
vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3
vpclmulqdq $0x01,%xmm4,%xmm6,%xmm1
vpclmulqdq $0x10,%xmm4,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 16(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu -16(%rax),%xmm6
vmovdqu -16(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 32(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 0(%rax),%xmm6
vmovdqu 0(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 48(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 16(%rax),%xmm6
vmovdqu 16(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 64(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 32(%rax),%xmm6
vmovdqu 32(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 80(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 96(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 112(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqa 80-32(%rax),%xmm6
vpxor %xmm0,%xmm6,%xmm6
vmovdqu 80-32(%rcx),%xmm5
vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 128(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vpsrldq $8,%xmm1,%xmm4
vpxor %xmm4,%xmm2,%xmm5
vpslldq $8,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm0
vmovdqa poly(%rip),%xmm3
vmovdqu 144(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 160(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 176(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 192(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 208(%r8),%xmm4
vaesenc %xmm4,%xmm7,%xmm7
vaesenc %xmm4,%xmm8,%xmm8
vaesenc %xmm4,%xmm9,%xmm9
vaesenc %xmm4,%xmm10,%xmm10
vaesenc %xmm4,%xmm11,%xmm11
vaesenc %xmm4,%xmm12,%xmm12
vmovdqu 224(%r8),%xmm6
vpalignr $8,%xmm0,%xmm0,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0
vpxor %xmm0,%xmm2,%xmm0
vpxor 0(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm7,%xmm7
vpxor 16(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm8,%xmm8
vpxor 32(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm9,%xmm9
vpxor 48(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm10,%xmm10
vpxor 64(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm11,%xmm11
vpxor 80(%rdi),%xmm6,%xmm4
vaesenclast %xmm4,%xmm12,%xmm12
vpalignr $8,%xmm0,%xmm0,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0
vpxor %xmm0,%xmm2,%xmm0
vmovdqu %xmm7,0(%rsi)
vmovdqu %xmm8,16(%rsi)
vmovdqu %xmm9,32(%rsi)
vmovdqu %xmm10,48(%rsi)
vmovdqu %xmm11,64(%rsi)
vmovdqu %xmm12,80(%rsi)
vpxor %xmm5,%xmm0,%xmm0
leaq 96(%rdi),%rdi
leaq 96(%rsi),%rsi
jmp .L256_dec_loop1
.L256_dec_finish_96:
vmovdqa %xmm12,%xmm6
vmovdqa %xmm11,16-32(%rax)
vmovdqa %xmm10,32-32(%rax)
vmovdqa %xmm9,48-32(%rax)
vmovdqa %xmm8,64-32(%rax)
vmovdqa %xmm7,80-32(%rax)
vmovdqu 0-32(%rcx),%xmm4
vpclmulqdq $0x10,%xmm4,%xmm6,%xmm1
vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2
vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3
vpclmulqdq $0x01,%xmm4,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu -16(%rax),%xmm6
vmovdqu -16(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 0(%rax),%xmm6
vmovdqu 0(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 16(%rax),%xmm6
vmovdqu 16(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 32(%rax),%xmm6
vmovdqu 32(%rcx),%xmm13
vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vmovdqu 80-32(%rax),%xmm6
vpxor %xmm0,%xmm6,%xmm6
vmovdqu 80-32(%rcx),%xmm5
vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm3,%xmm3
vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4
vpxor %xmm4,%xmm1,%xmm1
vpsrldq $8,%xmm1,%xmm4
vpxor %xmm4,%xmm2,%xmm5
vpslldq $8,%xmm1,%xmm4
vpxor %xmm4,%xmm3,%xmm0
vmovdqa poly(%rip),%xmm3
vpalignr $8,%xmm0,%xmm0,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0
vpxor %xmm0,%xmm2,%xmm0
vpalignr $8,%xmm0,%xmm0,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0
vpxor %xmm0,%xmm2,%xmm0
vpxor %xmm5,%xmm0,%xmm0
.L256_dec_loop2:
cmpq $16,%r9
jb .L256_dec_out
subq $16,%r9
vmovdqa %xmm15,%xmm2
vpaddd one(%rip),%xmm15,%xmm15
vpxor 0(%r8),%xmm2,%xmm2
vaesenc 16(%r8),%xmm2,%xmm2
vaesenc 32(%r8),%xmm2,%xmm2
vaesenc 48(%r8),%xmm2,%xmm2
vaesenc 64(%r8),%xmm2,%xmm2
vaesenc 80(%r8),%xmm2,%xmm2
vaesenc 96(%r8),%xmm2,%xmm2
vaesenc 112(%r8),%xmm2,%xmm2
vaesenc 128(%r8),%xmm2,%xmm2
vaesenc 144(%r8),%xmm2,%xmm2
vaesenc 160(%r8),%xmm2,%xmm2
vaesenc 176(%r8),%xmm2,%xmm2
vaesenc 192(%r8),%xmm2,%xmm2
vaesenc 208(%r8),%xmm2,%xmm2
vaesenclast 224(%r8),%xmm2,%xmm2
vpxor (%rdi),%xmm2,%xmm2
vmovdqu %xmm2,(%rsi)
addq $16,%rdi
addq $16,%rsi
vpxor %xmm2,%xmm0,%xmm0
vmovdqa -32(%rcx),%xmm1
call GFMUL
jmp .L256_dec_loop2
.L256_dec_out:
vmovdqu %xmm0,(%rdx)
.byte 0xf3,0xc3
.cfi_endproc
.size aes256gcmsiv_dec, .-aes256gcmsiv_dec
.globl aes256gcmsiv_kdf
.hidden aes256gcmsiv_kdf
.type aes256gcmsiv_kdf,@function
.align 16
aes256gcmsiv_kdf:
.cfi_startproc
_CET_ENDBR
vmovdqa (%rdx),%xmm1
vmovdqa 0(%rdi),%xmm4
vmovdqa and_mask(%rip),%xmm11
vmovdqa one(%rip),%xmm8
vpshufd $0x90,%xmm4,%xmm4
vpand %xmm11,%xmm4,%xmm4
vpaddd %xmm8,%xmm4,%xmm6
vpaddd %xmm8,%xmm6,%xmm7
vpaddd %xmm8,%xmm7,%xmm11
vpaddd %xmm8,%xmm11,%xmm12
vpaddd %xmm8,%xmm12,%xmm13
vpxor %xmm1,%xmm4,%xmm4
vpxor %xmm1,%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm1,%xmm11,%xmm11
vpxor %xmm1,%xmm12,%xmm12
vpxor %xmm1,%xmm13,%xmm13
vmovdqa 16(%rdx),%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vaesenc %xmm1,%xmm6,%xmm6
vaesenc %xmm1,%xmm7,%xmm7
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovdqa 32(%rdx),%xmm2
vaesenc %xmm2,%xmm4,%xmm4
vaesenc %xmm2,%xmm6,%xmm6
vaesenc %xmm2,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vaesenc %xmm2,%xmm12,%xmm12
vaesenc %xmm2,%xmm13,%xmm13
vmovdqa 48(%rdx),%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vaesenc %xmm1,%xmm6,%xmm6
vaesenc %xmm1,%xmm7,%xmm7
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovdqa 64(%rdx),%xmm2
vaesenc %xmm2,%xmm4,%xmm4
vaesenc %xmm2,%xmm6,%xmm6
vaesenc %xmm2,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vaesenc %xmm2,%xmm12,%xmm12
vaesenc %xmm2,%xmm13,%xmm13
vmovdqa 80(%rdx),%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vaesenc %xmm1,%xmm6,%xmm6
vaesenc %xmm1,%xmm7,%xmm7
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovdqa 96(%rdx),%xmm2
vaesenc %xmm2,%xmm4,%xmm4
vaesenc %xmm2,%xmm6,%xmm6
vaesenc %xmm2,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vaesenc %xmm2,%xmm12,%xmm12
vaesenc %xmm2,%xmm13,%xmm13
vmovdqa 112(%rdx),%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vaesenc %xmm1,%xmm6,%xmm6
vaesenc %xmm1,%xmm7,%xmm7
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovdqa 128(%rdx),%xmm2
vaesenc %xmm2,%xmm4,%xmm4
vaesenc %xmm2,%xmm6,%xmm6
vaesenc %xmm2,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vaesenc %xmm2,%xmm12,%xmm12
vaesenc %xmm2,%xmm13,%xmm13
vmovdqa 144(%rdx),%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vaesenc %xmm1,%xmm6,%xmm6
vaesenc %xmm1,%xmm7,%xmm7
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovdqa 160(%rdx),%xmm2
vaesenc %xmm2,%xmm4,%xmm4
vaesenc %xmm2,%xmm6,%xmm6
vaesenc %xmm2,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vaesenc %xmm2,%xmm12,%xmm12
vaesenc %xmm2,%xmm13,%xmm13
vmovdqa 176(%rdx),%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vaesenc %xmm1,%xmm6,%xmm6
vaesenc %xmm1,%xmm7,%xmm7
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovdqa 192(%rdx),%xmm2
vaesenc %xmm2,%xmm4,%xmm4
vaesenc %xmm2,%xmm6,%xmm6
vaesenc %xmm2,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vaesenc %xmm2,%xmm12,%xmm12
vaesenc %xmm2,%xmm13,%xmm13
vmovdqa 208(%rdx),%xmm1
vaesenc %xmm1,%xmm4,%xmm4
vaesenc %xmm1,%xmm6,%xmm6
vaesenc %xmm1,%xmm7,%xmm7
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovdqa 224(%rdx),%xmm2
vaesenclast %xmm2,%xmm4,%xmm4
vaesenclast %xmm2,%xmm6,%xmm6
vaesenclast %xmm2,%xmm7,%xmm7
vaesenclast %xmm2,%xmm11,%xmm11
vaesenclast %xmm2,%xmm12,%xmm12
vaesenclast %xmm2,%xmm13,%xmm13
vmovdqa %xmm4,0(%rsi)
vmovdqa %xmm6,16(%rsi)
vmovdqa %xmm7,32(%rsi)
vmovdqa %xmm11,48(%rsi)
vmovdqa %xmm12,64(%rsi)
vmovdqa %xmm13,80(%rsi)
.byte 0xf3,0xc3
.cfi_endproc
.size aes256gcmsiv_kdf, .-aes256gcmsiv_kdf
#endif
|
wlsfx/bnbb
| 58,628
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86_64/crypto/cipher_extra/aesni-sha1-x86_64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.extern OPENSSL_ia32cap_P
.hidden OPENSSL_ia32cap_P
.globl aesni_cbc_sha1_enc
.hidden aesni_cbc_sha1_enc
.type aesni_cbc_sha1_enc,@function
.align 32
aesni_cbc_sha1_enc:
.cfi_startproc
movl OPENSSL_ia32cap_P+0(%rip),%r10d
movq OPENSSL_ia32cap_P+4(%rip),%r11
btq $61,%r11
jc aesni_cbc_sha1_enc_shaext
andl $268435456,%r11d
andl $1073741824,%r10d
orl %r11d,%r10d
cmpl $1342177280,%r10d
je aesni_cbc_sha1_enc_avx
jmp aesni_cbc_sha1_enc_ssse3
.byte 0xf3,0xc3
.cfi_endproc
.size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc
.type aesni_cbc_sha1_enc_ssse3,@function
.align 32
aesni_cbc_sha1_enc_ssse3:
.cfi_startproc
movq 8(%rsp),%r10
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq -104(%rsp),%rsp
.cfi_adjust_cfa_offset 104
movq %rdi,%r12
movq %rsi,%r13
movq %rdx,%r14
leaq 112(%rcx),%r15
movdqu (%r8),%xmm2
movq %r8,88(%rsp)
shlq $6,%r14
subq %r12,%r13
movl 240-112(%r15),%r8d
addq %r10,%r14
leaq K_XX_XX(%rip),%r11
movl 0(%r9),%eax
movl 4(%r9),%ebx
movl 8(%r9),%ecx
movl 12(%r9),%edx
movl %ebx,%esi
movl 16(%r9),%ebp
movl %ecx,%edi
xorl %edx,%edi
andl %edi,%esi
movdqa 64(%r11),%xmm3
movdqa 0(%r11),%xmm13
movdqu 0(%r10),%xmm4
movdqu 16(%r10),%xmm5
movdqu 32(%r10),%xmm6
movdqu 48(%r10),%xmm7
.byte 102,15,56,0,227
.byte 102,15,56,0,235
.byte 102,15,56,0,243
addq $64,%r10
paddd %xmm13,%xmm4
.byte 102,15,56,0,251
paddd %xmm13,%xmm5
paddd %xmm13,%xmm6
movdqa %xmm4,0(%rsp)
psubd %xmm13,%xmm4
movdqa %xmm5,16(%rsp)
psubd %xmm13,%xmm5
movdqa %xmm6,32(%rsp)
psubd %xmm13,%xmm6
movups -112(%r15),%xmm15
movups 16-112(%r15),%xmm0
jmp .Loop_ssse3
.align 32
.Loop_ssse3:
rorl $2,%ebx
movups 0(%r12),%xmm14
xorps %xmm15,%xmm14
xorps %xmm14,%xmm2
movups -80(%r15),%xmm1
.byte 102,15,56,220,208
pshufd $238,%xmm4,%xmm8
xorl %edx,%esi
movdqa %xmm7,%xmm12
paddd %xmm7,%xmm13
movl %eax,%edi
addl 0(%rsp),%ebp
punpcklqdq %xmm5,%xmm8
xorl %ecx,%ebx
roll $5,%eax
addl %esi,%ebp
psrldq $4,%xmm12
andl %ebx,%edi
xorl %ecx,%ebx
pxor %xmm4,%xmm8
addl %eax,%ebp
rorl $7,%eax
pxor %xmm6,%xmm12
xorl %ecx,%edi
movl %ebp,%esi
addl 4(%rsp),%edx
pxor %xmm12,%xmm8
xorl %ebx,%eax
roll $5,%ebp
movdqa %xmm13,48(%rsp)
addl %edi,%edx
movups -64(%r15),%xmm0
.byte 102,15,56,220,209
andl %eax,%esi
movdqa %xmm8,%xmm3
xorl %ebx,%eax
addl %ebp,%edx
rorl $7,%ebp
movdqa %xmm8,%xmm12
xorl %ebx,%esi
pslldq $12,%xmm3
paddd %xmm8,%xmm8
movl %edx,%edi
addl 8(%rsp),%ecx
psrld $31,%xmm12
xorl %eax,%ebp
roll $5,%edx
addl %esi,%ecx
movdqa %xmm3,%xmm13
andl %ebp,%edi
xorl %eax,%ebp
psrld $30,%xmm3
addl %edx,%ecx
rorl $7,%edx
por %xmm12,%xmm8
xorl %eax,%edi
movl %ecx,%esi
addl 12(%rsp),%ebx
movups -48(%r15),%xmm1
.byte 102,15,56,220,208
pslld $2,%xmm13
pxor %xmm3,%xmm8
xorl %ebp,%edx
movdqa 0(%r11),%xmm3
roll $5,%ecx
addl %edi,%ebx
andl %edx,%esi
pxor %xmm13,%xmm8
xorl %ebp,%edx
addl %ecx,%ebx
rorl $7,%ecx
pshufd $238,%xmm5,%xmm9
xorl %ebp,%esi
movdqa %xmm8,%xmm13
paddd %xmm8,%xmm3
movl %ebx,%edi
addl 16(%rsp),%eax
punpcklqdq %xmm6,%xmm9
xorl %edx,%ecx
roll $5,%ebx
addl %esi,%eax
psrldq $4,%xmm13
andl %ecx,%edi
xorl %edx,%ecx
pxor %xmm5,%xmm9
addl %ebx,%eax
rorl $7,%ebx
movups -32(%r15),%xmm0
.byte 102,15,56,220,209
pxor %xmm7,%xmm13
xorl %edx,%edi
movl %eax,%esi
addl 20(%rsp),%ebp
pxor %xmm13,%xmm9
xorl %ecx,%ebx
roll $5,%eax
movdqa %xmm3,0(%rsp)
addl %edi,%ebp
andl %ebx,%esi
movdqa %xmm9,%xmm12
xorl %ecx,%ebx
addl %eax,%ebp
rorl $7,%eax
movdqa %xmm9,%xmm13
xorl %ecx,%esi
pslldq $12,%xmm12
paddd %xmm9,%xmm9
movl %ebp,%edi
addl 24(%rsp),%edx
psrld $31,%xmm13
xorl %ebx,%eax
roll $5,%ebp
addl %esi,%edx
movups -16(%r15),%xmm1
.byte 102,15,56,220,208
movdqa %xmm12,%xmm3
andl %eax,%edi
xorl %ebx,%eax
psrld $30,%xmm12
addl %ebp,%edx
rorl $7,%ebp
por %xmm13,%xmm9
xorl %ebx,%edi
movl %edx,%esi
addl 28(%rsp),%ecx
pslld $2,%xmm3
pxor %xmm12,%xmm9
xorl %eax,%ebp
movdqa 16(%r11),%xmm12
roll $5,%edx
addl %edi,%ecx
andl %ebp,%esi
pxor %xmm3,%xmm9
xorl %eax,%ebp
addl %edx,%ecx
rorl $7,%edx
pshufd $238,%xmm6,%xmm10
xorl %eax,%esi
movdqa %xmm9,%xmm3
paddd %xmm9,%xmm12
movl %ecx,%edi
addl 32(%rsp),%ebx
movups 0(%r15),%xmm0
.byte 102,15,56,220,209
punpcklqdq %xmm7,%xmm10
xorl %ebp,%edx
roll $5,%ecx
addl %esi,%ebx
psrldq $4,%xmm3
andl %edx,%edi
xorl %ebp,%edx
pxor %xmm6,%xmm10
addl %ecx,%ebx
rorl $7,%ecx
pxor %xmm8,%xmm3
xorl %ebp,%edi
movl %ebx,%esi
addl 36(%rsp),%eax
pxor %xmm3,%xmm10
xorl %edx,%ecx
roll $5,%ebx
movdqa %xmm12,16(%rsp)
addl %edi,%eax
andl %ecx,%esi
movdqa %xmm10,%xmm13
xorl %edx,%ecx
addl %ebx,%eax
rorl $7,%ebx
movups 16(%r15),%xmm1
.byte 102,15,56,220,208
movdqa %xmm10,%xmm3
xorl %edx,%esi
pslldq $12,%xmm13
paddd %xmm10,%xmm10
movl %eax,%edi
addl 40(%rsp),%ebp
psrld $31,%xmm3
xorl %ecx,%ebx
roll $5,%eax
addl %esi,%ebp
movdqa %xmm13,%xmm12
andl %ebx,%edi
xorl %ecx,%ebx
psrld $30,%xmm13
addl %eax,%ebp
rorl $7,%eax
por %xmm3,%xmm10
xorl %ecx,%edi
movl %ebp,%esi
addl 44(%rsp),%edx
pslld $2,%xmm12
pxor %xmm13,%xmm10
xorl %ebx,%eax
movdqa 16(%r11),%xmm13
roll $5,%ebp
addl %edi,%edx
movups 32(%r15),%xmm0
.byte 102,15,56,220,209
andl %eax,%esi
pxor %xmm12,%xmm10
xorl %ebx,%eax
addl %ebp,%edx
rorl $7,%ebp
pshufd $238,%xmm7,%xmm11
xorl %ebx,%esi
movdqa %xmm10,%xmm12
paddd %xmm10,%xmm13
movl %edx,%edi
addl 48(%rsp),%ecx
punpcklqdq %xmm8,%xmm11
xorl %eax,%ebp
roll $5,%edx
addl %esi,%ecx
psrldq $4,%xmm12
andl %ebp,%edi
xorl %eax,%ebp
pxor %xmm7,%xmm11
addl %edx,%ecx
rorl $7,%edx
pxor %xmm9,%xmm12
xorl %eax,%edi
movl %ecx,%esi
addl 52(%rsp),%ebx
movups 48(%r15),%xmm1
.byte 102,15,56,220,208
pxor %xmm12,%xmm11
xorl %ebp,%edx
roll $5,%ecx
movdqa %xmm13,32(%rsp)
addl %edi,%ebx
andl %edx,%esi
movdqa %xmm11,%xmm3
xorl %ebp,%edx
addl %ecx,%ebx
rorl $7,%ecx
movdqa %xmm11,%xmm12
xorl %ebp,%esi
pslldq $12,%xmm3
paddd %xmm11,%xmm11
movl %ebx,%edi
addl 56(%rsp),%eax
psrld $31,%xmm12
xorl %edx,%ecx
roll $5,%ebx
addl %esi,%eax
movdqa %xmm3,%xmm13
andl %ecx,%edi
xorl %edx,%ecx
psrld $30,%xmm3
addl %ebx,%eax
rorl $7,%ebx
cmpl $11,%r8d
jb .Laesenclast1
movups 64(%r15),%xmm0
.byte 102,15,56,220,209
movups 80(%r15),%xmm1
.byte 102,15,56,220,208
je .Laesenclast1
movups 96(%r15),%xmm0
.byte 102,15,56,220,209
movups 112(%r15),%xmm1
.byte 102,15,56,220,208
.Laesenclast1:
.byte 102,15,56,221,209
movups 16-112(%r15),%xmm0
por %xmm12,%xmm11
xorl %edx,%edi
movl %eax,%esi
addl 60(%rsp),%ebp
pslld $2,%xmm13
pxor %xmm3,%xmm11
xorl %ecx,%ebx
movdqa 16(%r11),%xmm3
roll $5,%eax
addl %edi,%ebp
andl %ebx,%esi
pxor %xmm13,%xmm11
pshufd $238,%xmm10,%xmm13
xorl %ecx,%ebx
addl %eax,%ebp
rorl $7,%eax
pxor %xmm8,%xmm4
xorl %ecx,%esi
movl %ebp,%edi
addl 0(%rsp),%edx
punpcklqdq %xmm11,%xmm13
xorl %ebx,%eax
roll $5,%ebp
pxor %xmm5,%xmm4
addl %esi,%edx
movups 16(%r12),%xmm14
xorps %xmm15,%xmm14
movups %xmm2,0(%r12,%r13,1)
xorps %xmm14,%xmm2
movups -80(%r15),%xmm1
.byte 102,15,56,220,208
andl %eax,%edi
movdqa %xmm3,%xmm12
xorl %ebx,%eax
paddd %xmm11,%xmm3
addl %ebp,%edx
pxor %xmm13,%xmm4
rorl $7,%ebp
xorl %ebx,%edi
movl %edx,%esi
addl 4(%rsp),%ecx
movdqa %xmm4,%xmm13
xorl %eax,%ebp
roll $5,%edx
movdqa %xmm3,48(%rsp)
addl %edi,%ecx
andl %ebp,%esi
xorl %eax,%ebp
pslld $2,%xmm4
addl %edx,%ecx
rorl $7,%edx
psrld $30,%xmm13
xorl %eax,%esi
movl %ecx,%edi
addl 8(%rsp),%ebx
movups -64(%r15),%xmm0
.byte 102,15,56,220,209
por %xmm13,%xmm4
xorl %ebp,%edx
roll $5,%ecx
pshufd $238,%xmm11,%xmm3
addl %esi,%ebx
andl %edx,%edi
xorl %ebp,%edx
addl %ecx,%ebx
addl 12(%rsp),%eax
xorl %ebp,%edi
movl %ebx,%esi
roll $5,%ebx
addl %edi,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
pxor %xmm9,%xmm5
addl 16(%rsp),%ebp
movups -48(%r15),%xmm1
.byte 102,15,56,220,208
xorl %ecx,%esi
punpcklqdq %xmm4,%xmm3
movl %eax,%edi
roll $5,%eax
pxor %xmm6,%xmm5
addl %esi,%ebp
xorl %ecx,%edi
movdqa %xmm12,%xmm13
rorl $7,%ebx
paddd %xmm4,%xmm12
addl %eax,%ebp
pxor %xmm3,%xmm5
addl 20(%rsp),%edx
xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
movdqa %xmm5,%xmm3
addl %edi,%edx
xorl %ebx,%esi
movdqa %xmm12,0(%rsp)
rorl $7,%eax
addl %ebp,%edx
addl 24(%rsp),%ecx
pslld $2,%xmm5
xorl %eax,%esi
movl %edx,%edi
psrld $30,%xmm3
roll $5,%edx
addl %esi,%ecx
movups -32(%r15),%xmm0
.byte 102,15,56,220,209
xorl %eax,%edi
rorl $7,%ebp
por %xmm3,%xmm5
addl %edx,%ecx
addl 28(%rsp),%ebx
pshufd $238,%xmm4,%xmm12
xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
addl %edi,%ebx
xorl %ebp,%esi
rorl $7,%edx
addl %ecx,%ebx
pxor %xmm10,%xmm6
addl 32(%rsp),%eax
xorl %edx,%esi
punpcklqdq %xmm5,%xmm12
movl %ebx,%edi
roll $5,%ebx
pxor %xmm7,%xmm6
addl %esi,%eax
xorl %edx,%edi
movdqa 32(%r11),%xmm3
rorl $7,%ecx
paddd %xmm5,%xmm13
addl %ebx,%eax
pxor %xmm12,%xmm6
addl 36(%rsp),%ebp
movups -16(%r15),%xmm1
.byte 102,15,56,220,208
xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
movdqa %xmm6,%xmm12
addl %edi,%ebp
xorl %ecx,%esi
movdqa %xmm13,16(%rsp)
rorl $7,%ebx
addl %eax,%ebp
addl 40(%rsp),%edx
pslld $2,%xmm6
xorl %ebx,%esi
movl %ebp,%edi
psrld $30,%xmm12
roll $5,%ebp
addl %esi,%edx
xorl %ebx,%edi
rorl $7,%eax
por %xmm12,%xmm6
addl %ebp,%edx
addl 44(%rsp),%ecx
pshufd $238,%xmm5,%xmm13
xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
addl %edi,%ecx
movups 0(%r15),%xmm0
.byte 102,15,56,220,209
xorl %eax,%esi
rorl $7,%ebp
addl %edx,%ecx
pxor %xmm11,%xmm7
addl 48(%rsp),%ebx
xorl %ebp,%esi
punpcklqdq %xmm6,%xmm13
movl %ecx,%edi
roll $5,%ecx
pxor %xmm8,%xmm7
addl %esi,%ebx
xorl %ebp,%edi
movdqa %xmm3,%xmm12
rorl $7,%edx
paddd %xmm6,%xmm3
addl %ecx,%ebx
pxor %xmm13,%xmm7
addl 52(%rsp),%eax
xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
movdqa %xmm7,%xmm13
addl %edi,%eax
xorl %edx,%esi
movdqa %xmm3,32(%rsp)
rorl $7,%ecx
addl %ebx,%eax
addl 56(%rsp),%ebp
movups 16(%r15),%xmm1
.byte 102,15,56,220,208
pslld $2,%xmm7
xorl %ecx,%esi
movl %eax,%edi
psrld $30,%xmm13
roll $5,%eax
addl %esi,%ebp
xorl %ecx,%edi
rorl $7,%ebx
por %xmm13,%xmm7
addl %eax,%ebp
addl 60(%rsp),%edx
pshufd $238,%xmm6,%xmm3
xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
addl %edi,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %ebp,%edx
pxor %xmm4,%xmm8
addl 0(%rsp),%ecx
xorl %eax,%esi
punpcklqdq %xmm7,%xmm3
movl %edx,%edi
roll $5,%edx
pxor %xmm9,%xmm8
addl %esi,%ecx
movups 32(%r15),%xmm0
.byte 102,15,56,220,209
xorl %eax,%edi
movdqa %xmm12,%xmm13
rorl $7,%ebp
paddd %xmm7,%xmm12
addl %edx,%ecx
pxor %xmm3,%xmm8
addl 4(%rsp),%ebx
xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
movdqa %xmm8,%xmm3
addl %edi,%ebx
xorl %ebp,%esi
movdqa %xmm12,48(%rsp)
rorl $7,%edx
addl %ecx,%ebx
addl 8(%rsp),%eax
pslld $2,%xmm8
xorl %edx,%esi
movl %ebx,%edi
psrld $30,%xmm3
roll $5,%ebx
addl %esi,%eax
xorl %edx,%edi
rorl $7,%ecx
por %xmm3,%xmm8
addl %ebx,%eax
addl 12(%rsp),%ebp
movups 48(%r15),%xmm1
.byte 102,15,56,220,208
pshufd $238,%xmm7,%xmm12
xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
addl %edi,%ebp
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%ebp
pxor %xmm5,%xmm9
addl 16(%rsp),%edx
xorl %ebx,%esi
punpcklqdq %xmm8,%xmm12
movl %ebp,%edi
roll $5,%ebp
pxor %xmm10,%xmm9
addl %esi,%edx
xorl %ebx,%edi
movdqa %xmm13,%xmm3
rorl $7,%eax
paddd %xmm8,%xmm13
addl %ebp,%edx
pxor %xmm12,%xmm9
addl 20(%rsp),%ecx
xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
movdqa %xmm9,%xmm12
addl %edi,%ecx
cmpl $11,%r8d
jb .Laesenclast2
movups 64(%r15),%xmm0
.byte 102,15,56,220,209
movups 80(%r15),%xmm1
.byte 102,15,56,220,208
je .Laesenclast2
movups 96(%r15),%xmm0
.byte 102,15,56,220,209
movups 112(%r15),%xmm1
.byte 102,15,56,220,208
.Laesenclast2:
.byte 102,15,56,221,209
movups 16-112(%r15),%xmm0
xorl %eax,%esi
movdqa %xmm13,0(%rsp)
rorl $7,%ebp
addl %edx,%ecx
addl 24(%rsp),%ebx
pslld $2,%xmm9
xorl %ebp,%esi
movl %ecx,%edi
psrld $30,%xmm12
roll $5,%ecx
addl %esi,%ebx
xorl %ebp,%edi
rorl $7,%edx
por %xmm12,%xmm9
addl %ecx,%ebx
addl 28(%rsp),%eax
pshufd $238,%xmm8,%xmm13
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%edi
roll $5,%ebx
addl %edi,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
pxor %xmm6,%xmm10
addl 32(%rsp),%ebp
movups 32(%r12),%xmm14
xorps %xmm15,%xmm14
movups %xmm2,16(%r13,%r12,1)
xorps %xmm14,%xmm2
movups -80(%r15),%xmm1
.byte 102,15,56,220,208
andl %ecx,%esi
xorl %edx,%ecx
rorl $7,%ebx
punpcklqdq %xmm9,%xmm13
movl %eax,%edi
xorl %ecx,%esi
pxor %xmm11,%xmm10
roll $5,%eax
addl %esi,%ebp
movdqa %xmm3,%xmm12
xorl %ebx,%edi
paddd %xmm9,%xmm3
xorl %ecx,%ebx
pxor %xmm13,%xmm10
addl %eax,%ebp
addl 36(%rsp),%edx
andl %ebx,%edi
xorl %ecx,%ebx
rorl $7,%eax
movdqa %xmm10,%xmm13
movl %ebp,%esi
xorl %ebx,%edi
movdqa %xmm3,16(%rsp)
roll $5,%ebp
addl %edi,%edx
movups -64(%r15),%xmm0
.byte 102,15,56,220,209
xorl %eax,%esi
pslld $2,%xmm10
xorl %ebx,%eax
addl %ebp,%edx
psrld $30,%xmm13
addl 40(%rsp),%ecx
andl %eax,%esi
xorl %ebx,%eax
por %xmm13,%xmm10
rorl $7,%ebp
movl %edx,%edi
xorl %eax,%esi
roll $5,%edx
pshufd $238,%xmm9,%xmm3
addl %esi,%ecx
xorl %ebp,%edi
xorl %eax,%ebp
addl %edx,%ecx
addl 44(%rsp),%ebx
andl %ebp,%edi
xorl %eax,%ebp
rorl $7,%edx
movups -48(%r15),%xmm1
.byte 102,15,56,220,208
movl %ecx,%esi
xorl %ebp,%edi
roll $5,%ecx
addl %edi,%ebx
xorl %edx,%esi
xorl %ebp,%edx
addl %ecx,%ebx
pxor %xmm7,%xmm11
addl 48(%rsp),%eax
andl %edx,%esi
xorl %ebp,%edx
rorl $7,%ecx
punpcklqdq %xmm10,%xmm3
movl %ebx,%edi
xorl %edx,%esi
pxor %xmm4,%xmm11
roll $5,%ebx
addl %esi,%eax
movdqa 48(%r11),%xmm13
xorl %ecx,%edi
paddd %xmm10,%xmm12
xorl %edx,%ecx
pxor %xmm3,%xmm11
addl %ebx,%eax
addl 52(%rsp),%ebp
movups -32(%r15),%xmm0
.byte 102,15,56,220,209
andl %ecx,%edi
xorl %edx,%ecx
rorl $7,%ebx
movdqa %xmm11,%xmm3
movl %eax,%esi
xorl %ecx,%edi
movdqa %xmm12,32(%rsp)
roll $5,%eax
addl %edi,%ebp
xorl %ebx,%esi
pslld $2,%xmm11
xorl %ecx,%ebx
addl %eax,%ebp
psrld $30,%xmm3
addl 56(%rsp),%edx
andl %ebx,%esi
xorl %ecx,%ebx
por %xmm3,%xmm11
rorl $7,%eax
movl %ebp,%edi
xorl %ebx,%esi
roll $5,%ebp
pshufd $238,%xmm10,%xmm12
addl %esi,%edx
movups -16(%r15),%xmm1
.byte 102,15,56,220,208
xorl %eax,%edi
xorl %ebx,%eax
addl %ebp,%edx
addl 60(%rsp),%ecx
andl %eax,%edi
xorl %ebx,%eax
rorl $7,%ebp
movl %edx,%esi
xorl %eax,%edi
roll $5,%edx
addl %edi,%ecx
xorl %ebp,%esi
xorl %eax,%ebp
addl %edx,%ecx
pxor %xmm8,%xmm4
addl 0(%rsp),%ebx
andl %ebp,%esi
xorl %eax,%ebp
rorl $7,%edx
movups 0(%r15),%xmm0
.byte 102,15,56,220,209
punpcklqdq %xmm11,%xmm12
movl %ecx,%edi
xorl %ebp,%esi
pxor %xmm5,%xmm4
roll $5,%ecx
addl %esi,%ebx
movdqa %xmm13,%xmm3
xorl %edx,%edi
paddd %xmm11,%xmm13
xorl %ebp,%edx
pxor %xmm12,%xmm4
addl %ecx,%ebx
addl 4(%rsp),%eax
andl %edx,%edi
xorl %ebp,%edx
rorl $7,%ecx
movdqa %xmm4,%xmm12
movl %ebx,%esi
xorl %edx,%edi
movdqa %xmm13,48(%rsp)
roll $5,%ebx
addl %edi,%eax
xorl %ecx,%esi
pslld $2,%xmm4
xorl %edx,%ecx
addl %ebx,%eax
psrld $30,%xmm12
addl 8(%rsp),%ebp
movups 16(%r15),%xmm1
.byte 102,15,56,220,208
andl %ecx,%esi
xorl %edx,%ecx
por %xmm12,%xmm4
rorl $7,%ebx
movl %eax,%edi
xorl %ecx,%esi
roll $5,%eax
pshufd $238,%xmm11,%xmm13
addl %esi,%ebp
xorl %ebx,%edi
xorl %ecx,%ebx
addl %eax,%ebp
addl 12(%rsp),%edx
andl %ebx,%edi
xorl %ecx,%ebx
rorl $7,%eax
movl %ebp,%esi
xorl %ebx,%edi
roll $5,%ebp
addl %edi,%edx
movups 32(%r15),%xmm0
.byte 102,15,56,220,209
xorl %eax,%esi
xorl %ebx,%eax
addl %ebp,%edx
pxor %xmm9,%xmm5
addl 16(%rsp),%ecx
andl %eax,%esi
xorl %ebx,%eax
rorl $7,%ebp
punpcklqdq %xmm4,%xmm13
movl %edx,%edi
xorl %eax,%esi
pxor %xmm6,%xmm5
roll $5,%edx
addl %esi,%ecx
movdqa %xmm3,%xmm12
xorl %ebp,%edi
paddd %xmm4,%xmm3
xorl %eax,%ebp
pxor %xmm13,%xmm5
addl %edx,%ecx
addl 20(%rsp),%ebx
andl %ebp,%edi
xorl %eax,%ebp
rorl $7,%edx
movups 48(%r15),%xmm1
.byte 102,15,56,220,208
movdqa %xmm5,%xmm13
movl %ecx,%esi
xorl %ebp,%edi
movdqa %xmm3,0(%rsp)
roll $5,%ecx
addl %edi,%ebx
xorl %edx,%esi
pslld $2,%xmm5
xorl %ebp,%edx
addl %ecx,%ebx
psrld $30,%xmm13
addl 24(%rsp),%eax
andl %edx,%esi
xorl %ebp,%edx
por %xmm13,%xmm5
rorl $7,%ecx
movl %ebx,%edi
xorl %edx,%esi
roll $5,%ebx
pshufd $238,%xmm4,%xmm3
addl %esi,%eax
xorl %ecx,%edi
xorl %edx,%ecx
addl %ebx,%eax
addl 28(%rsp),%ebp
cmpl $11,%r8d
jb .Laesenclast3
movups 64(%r15),%xmm0
.byte 102,15,56,220,209
movups 80(%r15),%xmm1
.byte 102,15,56,220,208
je .Laesenclast3
movups 96(%r15),%xmm0
.byte 102,15,56,220,209
movups 112(%r15),%xmm1
.byte 102,15,56,220,208
.Laesenclast3:
.byte 102,15,56,221,209
movups 16-112(%r15),%xmm0
andl %ecx,%edi
xorl %edx,%ecx
rorl $7,%ebx
movl %eax,%esi
xorl %ecx,%edi
roll $5,%eax
addl %edi,%ebp
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%ebp
pxor %xmm10,%xmm6
addl 32(%rsp),%edx
andl %ebx,%esi
xorl %ecx,%ebx
rorl $7,%eax
punpcklqdq %xmm5,%xmm3
movl %ebp,%edi
xorl %ebx,%esi
pxor %xmm7,%xmm6
roll $5,%ebp
addl %esi,%edx
movups 48(%r12),%xmm14
xorps %xmm15,%xmm14
movups %xmm2,32(%r13,%r12,1)
xorps %xmm14,%xmm2
movups -80(%r15),%xmm1
.byte 102,15,56,220,208
movdqa %xmm12,%xmm13
xorl %eax,%edi
paddd %xmm5,%xmm12
xorl %ebx,%eax
pxor %xmm3,%xmm6
addl %ebp,%edx
addl 36(%rsp),%ecx
andl %eax,%edi
xorl %ebx,%eax
rorl $7,%ebp
movdqa %xmm6,%xmm3
movl %edx,%esi
xorl %eax,%edi
movdqa %xmm12,16(%rsp)
roll $5,%edx
addl %edi,%ecx
xorl %ebp,%esi
pslld $2,%xmm6
xorl %eax,%ebp
addl %edx,%ecx
psrld $30,%xmm3
addl 40(%rsp),%ebx
andl %ebp,%esi
xorl %eax,%ebp
por %xmm3,%xmm6
rorl $7,%edx
movups -64(%r15),%xmm0
.byte 102,15,56,220,209
movl %ecx,%edi
xorl %ebp,%esi
roll $5,%ecx
pshufd $238,%xmm5,%xmm12
addl %esi,%ebx
xorl %edx,%edi
xorl %ebp,%edx
addl %ecx,%ebx
addl 44(%rsp),%eax
andl %edx,%edi
xorl %ebp,%edx
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%edi
roll $5,%ebx
addl %edi,%eax
xorl %edx,%esi
addl %ebx,%eax
pxor %xmm11,%xmm7
addl 48(%rsp),%ebp
movups -48(%r15),%xmm1
.byte 102,15,56,220,208
xorl %ecx,%esi
punpcklqdq %xmm6,%xmm12
movl %eax,%edi
roll $5,%eax
pxor %xmm8,%xmm7
addl %esi,%ebp
xorl %ecx,%edi
movdqa %xmm13,%xmm3
rorl $7,%ebx
paddd %xmm6,%xmm13
addl %eax,%ebp
pxor %xmm12,%xmm7
addl 52(%rsp),%edx
xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
movdqa %xmm7,%xmm12
addl %edi,%edx
xorl %ebx,%esi
movdqa %xmm13,32(%rsp)
rorl $7,%eax
addl %ebp,%edx
addl 56(%rsp),%ecx
pslld $2,%xmm7
xorl %eax,%esi
movl %edx,%edi
psrld $30,%xmm12
roll $5,%edx
addl %esi,%ecx
movups -32(%r15),%xmm0
.byte 102,15,56,220,209
xorl %eax,%edi
rorl $7,%ebp
por %xmm12,%xmm7
addl %edx,%ecx
addl 60(%rsp),%ebx
xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
addl %edi,%ebx
xorl %ebp,%esi
rorl $7,%edx
addl %ecx,%ebx
addl 0(%rsp),%eax
xorl %edx,%esi
movl %ebx,%edi
roll $5,%ebx
paddd %xmm7,%xmm3
addl %esi,%eax
xorl %edx,%edi
movdqa %xmm3,48(%rsp)
rorl $7,%ecx
addl %ebx,%eax
addl 4(%rsp),%ebp
movups -16(%r15),%xmm1
.byte 102,15,56,220,208
xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
addl %edi,%ebp
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%ebp
addl 8(%rsp),%edx
xorl %ebx,%esi
movl %ebp,%edi
roll $5,%ebp
addl %esi,%edx
xorl %ebx,%edi
rorl $7,%eax
addl %ebp,%edx
addl 12(%rsp),%ecx
xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
addl %edi,%ecx
movups 0(%r15),%xmm0
.byte 102,15,56,220,209
xorl %eax,%esi
rorl $7,%ebp
addl %edx,%ecx
cmpq %r14,%r10
je .Ldone_ssse3
movdqa 64(%r11),%xmm3
movdqa 0(%r11),%xmm13
movdqu 0(%r10),%xmm4
movdqu 16(%r10),%xmm5
movdqu 32(%r10),%xmm6
movdqu 48(%r10),%xmm7
.byte 102,15,56,0,227
addq $64,%r10
addl 16(%rsp),%ebx
xorl %ebp,%esi
movl %ecx,%edi
.byte 102,15,56,0,235
roll $5,%ecx
addl %esi,%ebx
xorl %ebp,%edi
rorl $7,%edx
paddd %xmm13,%xmm4
addl %ecx,%ebx
addl 20(%rsp),%eax
xorl %edx,%edi
movl %ebx,%esi
movdqa %xmm4,0(%rsp)
roll $5,%ebx
addl %edi,%eax
xorl %edx,%esi
rorl $7,%ecx
psubd %xmm13,%xmm4
addl %ebx,%eax
addl 24(%rsp),%ebp
movups 16(%r15),%xmm1
.byte 102,15,56,220,208
xorl %ecx,%esi
movl %eax,%edi
roll $5,%eax
addl %esi,%ebp
xorl %ecx,%edi
rorl $7,%ebx
addl %eax,%ebp
addl 28(%rsp),%edx
xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
addl %edi,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %ebp,%edx
addl 32(%rsp),%ecx
xorl %eax,%esi
movl %edx,%edi
.byte 102,15,56,0,243
roll $5,%edx
addl %esi,%ecx
movups 32(%r15),%xmm0
.byte 102,15,56,220,209
xorl %eax,%edi
rorl $7,%ebp
paddd %xmm13,%xmm5
addl %edx,%ecx
addl 36(%rsp),%ebx
xorl %ebp,%edi
movl %ecx,%esi
movdqa %xmm5,16(%rsp)
roll $5,%ecx
addl %edi,%ebx
xorl %ebp,%esi
rorl $7,%edx
psubd %xmm13,%xmm5
addl %ecx,%ebx
addl 40(%rsp),%eax
xorl %edx,%esi
movl %ebx,%edi
roll $5,%ebx
addl %esi,%eax
xorl %edx,%edi
rorl $7,%ecx
addl %ebx,%eax
addl 44(%rsp),%ebp
movups 48(%r15),%xmm1
.byte 102,15,56,220,208
xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
addl %edi,%ebp
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%ebp
addl 48(%rsp),%edx
xorl %ebx,%esi
movl %ebp,%edi
.byte 102,15,56,0,251
roll $5,%ebp
addl %esi,%edx
xorl %ebx,%edi
rorl $7,%eax
paddd %xmm13,%xmm6
addl %ebp,%edx
addl 52(%rsp),%ecx
xorl %eax,%edi
movl %edx,%esi
movdqa %xmm6,32(%rsp)
roll $5,%edx
addl %edi,%ecx
cmpl $11,%r8d
jb .Laesenclast4
movups 64(%r15),%xmm0
.byte 102,15,56,220,209
movups 80(%r15),%xmm1
.byte 102,15,56,220,208
je .Laesenclast4
movups 96(%r15),%xmm0
.byte 102,15,56,220,209
movups 112(%r15),%xmm1
.byte 102,15,56,220,208
.Laesenclast4:
.byte 102,15,56,221,209
movups 16-112(%r15),%xmm0
xorl %eax,%esi
rorl $7,%ebp
psubd %xmm13,%xmm6
addl %edx,%ecx
addl 56(%rsp),%ebx
xorl %ebp,%esi
movl %ecx,%edi
roll $5,%ecx
addl %esi,%ebx
xorl %ebp,%edi
rorl $7,%edx
addl %ecx,%ebx
addl 60(%rsp),%eax
xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
addl %edi,%eax
rorl $7,%ecx
addl %ebx,%eax
movups %xmm2,48(%r13,%r12,1)
leaq 64(%r12),%r12
addl 0(%r9),%eax
addl 4(%r9),%esi
addl 8(%r9),%ecx
addl 12(%r9),%edx
movl %eax,0(%r9)
addl 16(%r9),%ebp
movl %esi,4(%r9)
movl %esi,%ebx
movl %ecx,8(%r9)
movl %ecx,%edi
movl %edx,12(%r9)
xorl %edx,%edi
movl %ebp,16(%r9)
andl %edi,%esi
jmp .Loop_ssse3
.Ldone_ssse3:
addl 16(%rsp),%ebx
xorl %ebp,%esi
movl %ecx,%edi
roll $5,%ecx
addl %esi,%ebx
xorl %ebp,%edi
rorl $7,%edx
addl %ecx,%ebx
addl 20(%rsp),%eax
xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
addl %edi,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
addl 24(%rsp),%ebp
movups 16(%r15),%xmm1
.byte 102,15,56,220,208
xorl %ecx,%esi
movl %eax,%edi
roll $5,%eax
addl %esi,%ebp
xorl %ecx,%edi
rorl $7,%ebx
addl %eax,%ebp
addl 28(%rsp),%edx
xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
addl %edi,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %ebp,%edx
addl 32(%rsp),%ecx
xorl %eax,%esi
movl %edx,%edi
roll $5,%edx
addl %esi,%ecx
movups 32(%r15),%xmm0
.byte 102,15,56,220,209
xorl %eax,%edi
rorl $7,%ebp
addl %edx,%ecx
addl 36(%rsp),%ebx
xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
addl %edi,%ebx
xorl %ebp,%esi
rorl $7,%edx
addl %ecx,%ebx
addl 40(%rsp),%eax
xorl %edx,%esi
movl %ebx,%edi
roll $5,%ebx
addl %esi,%eax
xorl %edx,%edi
rorl $7,%ecx
addl %ebx,%eax
addl 44(%rsp),%ebp
movups 48(%r15),%xmm1
.byte 102,15,56,220,208
xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
addl %edi,%ebp
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%ebp
addl 48(%rsp),%edx
xorl %ebx,%esi
movl %ebp,%edi
roll $5,%ebp
addl %esi,%edx
xorl %ebx,%edi
rorl $7,%eax
addl %ebp,%edx
addl 52(%rsp),%ecx
xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
addl %edi,%ecx
cmpl $11,%r8d
jb .Laesenclast5
movups 64(%r15),%xmm0
.byte 102,15,56,220,209
movups 80(%r15),%xmm1
.byte 102,15,56,220,208
je .Laesenclast5
movups 96(%r15),%xmm0
.byte 102,15,56,220,209
movups 112(%r15),%xmm1
.byte 102,15,56,220,208
.Laesenclast5:
.byte 102,15,56,221,209
movups 16-112(%r15),%xmm0
xorl %eax,%esi
rorl $7,%ebp
addl %edx,%ecx
addl 56(%rsp),%ebx
xorl %ebp,%esi
movl %ecx,%edi
roll $5,%ecx
addl %esi,%ebx
xorl %ebp,%edi
rorl $7,%edx
addl %ecx,%ebx
addl 60(%rsp),%eax
xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
addl %edi,%eax
rorl $7,%ecx
addl %ebx,%eax
movups %xmm2,48(%r13,%r12,1)
movq 88(%rsp),%r8
addl 0(%r9),%eax
addl 4(%r9),%esi
addl 8(%r9),%ecx
movl %eax,0(%r9)
addl 12(%r9),%edx
movl %esi,4(%r9)
addl 16(%r9),%ebp
movl %ecx,8(%r9)
movl %edx,12(%r9)
movl %ebp,16(%r9)
movups %xmm2,(%r8)
leaq 104(%rsp),%rsi
.cfi_def_cfa %rsi,56
movq 0(%rsi),%r15
.cfi_restore %r15
movq 8(%rsi),%r14
.cfi_restore %r14
movq 16(%rsi),%r13
.cfi_restore %r13
movq 24(%rsi),%r12
.cfi_restore %r12
movq 32(%rsi),%rbp
.cfi_restore %rbp
movq 40(%rsi),%rbx
.cfi_restore %rbx
leaq 48(%rsi),%rsp
.cfi_def_cfa %rsp,8
.Lepilogue_ssse3:
.byte 0xf3,0xc3
.cfi_endproc
.size aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
.type aesni_cbc_sha1_enc_avx,@function
.align 32
aesni_cbc_sha1_enc_avx:
.cfi_startproc
movq 8(%rsp),%r10
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq -104(%rsp),%rsp
.cfi_adjust_cfa_offset 104
vzeroall
movq %rdi,%r12
movq %rsi,%r13
movq %rdx,%r14
leaq 112(%rcx),%r15
vmovdqu (%r8),%xmm12
movq %r8,88(%rsp)
shlq $6,%r14
subq %r12,%r13
movl 240-112(%r15),%r8d
addq %r10,%r14
leaq K_XX_XX(%rip),%r11
movl 0(%r9),%eax
movl 4(%r9),%ebx
movl 8(%r9),%ecx
movl 12(%r9),%edx
movl %ebx,%esi
movl 16(%r9),%ebp
movl %ecx,%edi
xorl %edx,%edi
andl %edi,%esi
vmovdqa 64(%r11),%xmm6
vmovdqa 0(%r11),%xmm10
vmovdqu 0(%r10),%xmm0
vmovdqu 16(%r10),%xmm1
vmovdqu 32(%r10),%xmm2
vmovdqu 48(%r10),%xmm3
vpshufb %xmm6,%xmm0,%xmm0
addq $64,%r10
vpshufb %xmm6,%xmm1,%xmm1
vpshufb %xmm6,%xmm2,%xmm2
vpshufb %xmm6,%xmm3,%xmm3
vpaddd %xmm10,%xmm0,%xmm4
vpaddd %xmm10,%xmm1,%xmm5
vpaddd %xmm10,%xmm2,%xmm6
vmovdqa %xmm4,0(%rsp)
vmovdqa %xmm5,16(%rsp)
vmovdqa %xmm6,32(%rsp)
vmovups -112(%r15),%xmm15
vmovups 16-112(%r15),%xmm14
jmp .Loop_avx
.align 32
.Loop_avx:
shrdl $2,%ebx,%ebx
vmovdqu 0(%r12),%xmm13
vpxor %xmm15,%xmm13,%xmm13
vpxor %xmm13,%xmm12,%xmm12
vaesenc %xmm14,%xmm12,%xmm12
vmovups -80(%r15),%xmm15
xorl %edx,%esi
vpalignr $8,%xmm0,%xmm1,%xmm4
movl %eax,%edi
addl 0(%rsp),%ebp
vpaddd %xmm3,%xmm10,%xmm9
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpsrldq $4,%xmm3,%xmm8
addl %esi,%ebp
andl %ebx,%edi
vpxor %xmm0,%xmm4,%xmm4
xorl %ecx,%ebx
addl %eax,%ebp
vpxor %xmm2,%xmm8,%xmm8
shrdl $7,%eax,%eax
xorl %ecx,%edi
movl %ebp,%esi
addl 4(%rsp),%edx
vpxor %xmm8,%xmm4,%xmm4
xorl %ebx,%eax
shldl $5,%ebp,%ebp
vmovdqa %xmm9,48(%rsp)
addl %edi,%edx
vaesenc %xmm15,%xmm12,%xmm12
vmovups -64(%r15),%xmm14
andl %eax,%esi
vpsrld $31,%xmm4,%xmm8
xorl %ebx,%eax
addl %ebp,%edx
shrdl $7,%ebp,%ebp
xorl %ebx,%esi
vpslldq $12,%xmm4,%xmm9
vpaddd %xmm4,%xmm4,%xmm4
movl %edx,%edi
addl 8(%rsp),%ecx
xorl %eax,%ebp
shldl $5,%edx,%edx
vpor %xmm8,%xmm4,%xmm4
vpsrld $30,%xmm9,%xmm8
addl %esi,%ecx
andl %ebp,%edi
xorl %eax,%ebp
addl %edx,%ecx
vpslld $2,%xmm9,%xmm9
vpxor %xmm8,%xmm4,%xmm4
shrdl $7,%edx,%edx
xorl %eax,%edi
movl %ecx,%esi
addl 12(%rsp),%ebx
vaesenc %xmm14,%xmm12,%xmm12
vmovups -48(%r15),%xmm15
vpxor %xmm9,%xmm4,%xmm4
xorl %ebp,%edx
shldl $5,%ecx,%ecx
addl %edi,%ebx
andl %edx,%esi
xorl %ebp,%edx
addl %ecx,%ebx
shrdl $7,%ecx,%ecx
xorl %ebp,%esi
vpalignr $8,%xmm1,%xmm2,%xmm5
movl %ebx,%edi
addl 16(%rsp),%eax
vpaddd %xmm4,%xmm10,%xmm9
xorl %edx,%ecx
shldl $5,%ebx,%ebx
vpsrldq $4,%xmm4,%xmm8
addl %esi,%eax
andl %ecx,%edi
vpxor %xmm1,%xmm5,%xmm5
xorl %edx,%ecx
addl %ebx,%eax
vpxor %xmm3,%xmm8,%xmm8
shrdl $7,%ebx,%ebx
vaesenc %xmm15,%xmm12,%xmm12
vmovups -32(%r15),%xmm14
xorl %edx,%edi
movl %eax,%esi
addl 20(%rsp),%ebp
vpxor %xmm8,%xmm5,%xmm5
xorl %ecx,%ebx
shldl $5,%eax,%eax
vmovdqa %xmm9,0(%rsp)
addl %edi,%ebp
andl %ebx,%esi
vpsrld $31,%xmm5,%xmm8
xorl %ecx,%ebx
addl %eax,%ebp
shrdl $7,%eax,%eax
xorl %ecx,%esi
vpslldq $12,%xmm5,%xmm9
vpaddd %xmm5,%xmm5,%xmm5
movl %ebp,%edi
addl 24(%rsp),%edx
xorl %ebx,%eax
shldl $5,%ebp,%ebp
vpor %xmm8,%xmm5,%xmm5
vpsrld $30,%xmm9,%xmm8
addl %esi,%edx
vaesenc %xmm14,%xmm12,%xmm12
vmovups -16(%r15),%xmm15
andl %eax,%edi
xorl %ebx,%eax
addl %ebp,%edx
vpslld $2,%xmm9,%xmm9
vpxor %xmm8,%xmm5,%xmm5
shrdl $7,%ebp,%ebp
xorl %ebx,%edi
movl %edx,%esi
addl 28(%rsp),%ecx
vpxor %xmm9,%xmm5,%xmm5
xorl %eax,%ebp
shldl $5,%edx,%edx
vmovdqa 16(%r11),%xmm10
addl %edi,%ecx
andl %ebp,%esi
xorl %eax,%ebp
addl %edx,%ecx
shrdl $7,%edx,%edx
xorl %eax,%esi
vpalignr $8,%xmm2,%xmm3,%xmm6
movl %ecx,%edi
addl 32(%rsp),%ebx
vaesenc %xmm15,%xmm12,%xmm12
vmovups 0(%r15),%xmm14
vpaddd %xmm5,%xmm10,%xmm9
xorl %ebp,%edx
shldl $5,%ecx,%ecx
vpsrldq $4,%xmm5,%xmm8
addl %esi,%ebx
andl %edx,%edi
vpxor %xmm2,%xmm6,%xmm6
xorl %ebp,%edx
addl %ecx,%ebx
vpxor %xmm4,%xmm8,%xmm8
shrdl $7,%ecx,%ecx
xorl %ebp,%edi
movl %ebx,%esi
addl 36(%rsp),%eax
vpxor %xmm8,%xmm6,%xmm6
xorl %edx,%ecx
shldl $5,%ebx,%ebx
vmovdqa %xmm9,16(%rsp)
addl %edi,%eax
andl %ecx,%esi
vpsrld $31,%xmm6,%xmm8
xorl %edx,%ecx
addl %ebx,%eax
shrdl $7,%ebx,%ebx
vaesenc %xmm14,%xmm12,%xmm12
vmovups 16(%r15),%xmm15
xorl %edx,%esi
vpslldq $12,%xmm6,%xmm9
vpaddd %xmm6,%xmm6,%xmm6
movl %eax,%edi
addl 40(%rsp),%ebp
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpor %xmm8,%xmm6,%xmm6
vpsrld $30,%xmm9,%xmm8
addl %esi,%ebp
andl %ebx,%edi
xorl %ecx,%ebx
addl %eax,%ebp
vpslld $2,%xmm9,%xmm9
vpxor %xmm8,%xmm6,%xmm6
shrdl $7,%eax,%eax
xorl %ecx,%edi
movl %ebp,%esi
addl 44(%rsp),%edx
vpxor %xmm9,%xmm6,%xmm6
xorl %ebx,%eax
shldl $5,%ebp,%ebp
addl %edi,%edx
vaesenc %xmm15,%xmm12,%xmm12
vmovups 32(%r15),%xmm14
andl %eax,%esi
xorl %ebx,%eax
addl %ebp,%edx
shrdl $7,%ebp,%ebp
xorl %ebx,%esi
vpalignr $8,%xmm3,%xmm4,%xmm7
movl %edx,%edi
addl 48(%rsp),%ecx
vpaddd %xmm6,%xmm10,%xmm9
xorl %eax,%ebp
shldl $5,%edx,%edx
vpsrldq $4,%xmm6,%xmm8
addl %esi,%ecx
andl %ebp,%edi
vpxor %xmm3,%xmm7,%xmm7
xorl %eax,%ebp
addl %edx,%ecx
vpxor %xmm5,%xmm8,%xmm8
shrdl $7,%edx,%edx
xorl %eax,%edi
movl %ecx,%esi
addl 52(%rsp),%ebx
vaesenc %xmm14,%xmm12,%xmm12
vmovups 48(%r15),%xmm15
vpxor %xmm8,%xmm7,%xmm7
xorl %ebp,%edx
shldl $5,%ecx,%ecx
vmovdqa %xmm9,32(%rsp)
addl %edi,%ebx
andl %edx,%esi
vpsrld $31,%xmm7,%xmm8
xorl %ebp,%edx
addl %ecx,%ebx
shrdl $7,%ecx,%ecx
xorl %ebp,%esi
vpslldq $12,%xmm7,%xmm9
vpaddd %xmm7,%xmm7,%xmm7
movl %ebx,%edi
addl 56(%rsp),%eax
xorl %edx,%ecx
shldl $5,%ebx,%ebx
vpor %xmm8,%xmm7,%xmm7
vpsrld $30,%xmm9,%xmm8
addl %esi,%eax
andl %ecx,%edi
xorl %edx,%ecx
addl %ebx,%eax
vpslld $2,%xmm9,%xmm9
vpxor %xmm8,%xmm7,%xmm7
shrdl $7,%ebx,%ebx
cmpl $11,%r8d
jb .Lvaesenclast6
vaesenc %xmm15,%xmm12,%xmm12
vmovups 64(%r15),%xmm14
vaesenc %xmm14,%xmm12,%xmm12
vmovups 80(%r15),%xmm15
je .Lvaesenclast6
vaesenc %xmm15,%xmm12,%xmm12
vmovups 96(%r15),%xmm14
vaesenc %xmm14,%xmm12,%xmm12
vmovups 112(%r15),%xmm15
.Lvaesenclast6:
vaesenclast %xmm15,%xmm12,%xmm12
vmovups -112(%r15),%xmm15
vmovups 16-112(%r15),%xmm14
xorl %edx,%edi
movl %eax,%esi
addl 60(%rsp),%ebp
vpxor %xmm9,%xmm7,%xmm7
xorl %ecx,%ebx
shldl $5,%eax,%eax
addl %edi,%ebp
andl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%ebp
vpalignr $8,%xmm6,%xmm7,%xmm8
vpxor %xmm4,%xmm0,%xmm0
shrdl $7,%eax,%eax
xorl %ecx,%esi
movl %ebp,%edi
addl 0(%rsp),%edx
vpxor %xmm1,%xmm0,%xmm0
xorl %ebx,%eax
shldl $5,%ebp,%ebp
vpaddd %xmm7,%xmm10,%xmm9
addl %esi,%edx
vmovdqu 16(%r12),%xmm13
vpxor %xmm15,%xmm13,%xmm13
vmovups %xmm12,0(%r12,%r13,1)
vpxor %xmm13,%xmm12,%xmm12
vaesenc %xmm14,%xmm12,%xmm12
vmovups -80(%r15),%xmm15
andl %eax,%edi
vpxor %xmm8,%xmm0,%xmm0
xorl %ebx,%eax
addl %ebp,%edx
shrdl $7,%ebp,%ebp
xorl %ebx,%edi
vpsrld $30,%xmm0,%xmm8
vmovdqa %xmm9,48(%rsp)
movl %edx,%esi
addl 4(%rsp),%ecx
xorl %eax,%ebp
shldl $5,%edx,%edx
vpslld $2,%xmm0,%xmm0
addl %edi,%ecx
andl %ebp,%esi
xorl %eax,%ebp
addl %edx,%ecx
shrdl $7,%edx,%edx
xorl %eax,%esi
movl %ecx,%edi
addl 8(%rsp),%ebx
vaesenc %xmm15,%xmm12,%xmm12
vmovups -64(%r15),%xmm14
vpor %xmm8,%xmm0,%xmm0
xorl %ebp,%edx
shldl $5,%ecx,%ecx
addl %esi,%ebx
andl %edx,%edi
xorl %ebp,%edx
addl %ecx,%ebx
addl 12(%rsp),%eax
xorl %ebp,%edi
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %edi,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpalignr $8,%xmm7,%xmm0,%xmm8
vpxor %xmm5,%xmm1,%xmm1
addl 16(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups -48(%r15),%xmm15
xorl %ecx,%esi
movl %eax,%edi
shldl $5,%eax,%eax
vpxor %xmm2,%xmm1,%xmm1
addl %esi,%ebp
xorl %ecx,%edi
vpaddd %xmm0,%xmm10,%xmm9
shrdl $7,%ebx,%ebx
addl %eax,%ebp
vpxor %xmm8,%xmm1,%xmm1
addl 20(%rsp),%edx
xorl %ebx,%edi
movl %ebp,%esi
shldl $5,%ebp,%ebp
vpsrld $30,%xmm1,%xmm8
vmovdqa %xmm9,0(%rsp)
addl %edi,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %ebp,%edx
vpslld $2,%xmm1,%xmm1
addl 24(%rsp),%ecx
xorl %eax,%esi
movl %edx,%edi
shldl $5,%edx,%edx
addl %esi,%ecx
vaesenc %xmm15,%xmm12,%xmm12
vmovups -32(%r15),%xmm14
xorl %eax,%edi
shrdl $7,%ebp,%ebp
addl %edx,%ecx
vpor %xmm8,%xmm1,%xmm1
addl 28(%rsp),%ebx
xorl %ebp,%edi
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %edi,%ebx
xorl %ebp,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpalignr $8,%xmm0,%xmm1,%xmm8
vpxor %xmm6,%xmm2,%xmm2
addl 32(%rsp),%eax
xorl %edx,%esi
movl %ebx,%edi
shldl $5,%ebx,%ebx
vpxor %xmm3,%xmm2,%xmm2
addl %esi,%eax
xorl %edx,%edi
vpaddd %xmm1,%xmm10,%xmm9
vmovdqa 32(%r11),%xmm10
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpxor %xmm8,%xmm2,%xmm2
addl 36(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups -16(%r15),%xmm15
xorl %ecx,%edi
movl %eax,%esi
shldl $5,%eax,%eax
vpsrld $30,%xmm2,%xmm8
vmovdqa %xmm9,16(%rsp)
addl %edi,%ebp
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%ebp
vpslld $2,%xmm2,%xmm2
addl 40(%rsp),%edx
xorl %ebx,%esi
movl %ebp,%edi
shldl $5,%ebp,%ebp
addl %esi,%edx
xorl %ebx,%edi
shrdl $7,%eax,%eax
addl %ebp,%edx
vpor %xmm8,%xmm2,%xmm2
addl 44(%rsp),%ecx
xorl %eax,%edi
movl %edx,%esi
shldl $5,%edx,%edx
addl %edi,%ecx
vaesenc %xmm15,%xmm12,%xmm12
vmovups 0(%r15),%xmm14
xorl %eax,%esi
shrdl $7,%ebp,%ebp
addl %edx,%ecx
vpalignr $8,%xmm1,%xmm2,%xmm8
vpxor %xmm7,%xmm3,%xmm3
addl 48(%rsp),%ebx
xorl %ebp,%esi
movl %ecx,%edi
shldl $5,%ecx,%ecx
vpxor %xmm4,%xmm3,%xmm3
addl %esi,%ebx
xorl %ebp,%edi
vpaddd %xmm2,%xmm10,%xmm9
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpxor %xmm8,%xmm3,%xmm3
addl 52(%rsp),%eax
xorl %edx,%edi
movl %ebx,%esi
shldl $5,%ebx,%ebx
vpsrld $30,%xmm3,%xmm8
vmovdqa %xmm9,32(%rsp)
addl %edi,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpslld $2,%xmm3,%xmm3
addl 56(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups 16(%r15),%xmm15
xorl %ecx,%esi
movl %eax,%edi
shldl $5,%eax,%eax
addl %esi,%ebp
xorl %ecx,%edi
shrdl $7,%ebx,%ebx
addl %eax,%ebp
vpor %xmm8,%xmm3,%xmm3
addl 60(%rsp),%edx
xorl %ebx,%edi
movl %ebp,%esi
shldl $5,%ebp,%ebp
addl %edi,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %ebp,%edx
vpalignr $8,%xmm2,%xmm3,%xmm8
vpxor %xmm0,%xmm4,%xmm4
addl 0(%rsp),%ecx
xorl %eax,%esi
movl %edx,%edi
shldl $5,%edx,%edx
vpxor %xmm5,%xmm4,%xmm4
addl %esi,%ecx
vaesenc %xmm15,%xmm12,%xmm12
vmovups 32(%r15),%xmm14
xorl %eax,%edi
vpaddd %xmm3,%xmm10,%xmm9
shrdl $7,%ebp,%ebp
addl %edx,%ecx
vpxor %xmm8,%xmm4,%xmm4
addl 4(%rsp),%ebx
xorl %ebp,%edi
movl %ecx,%esi
shldl $5,%ecx,%ecx
vpsrld $30,%xmm4,%xmm8
vmovdqa %xmm9,48(%rsp)
addl %edi,%ebx
xorl %ebp,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpslld $2,%xmm4,%xmm4
addl 8(%rsp),%eax
xorl %edx,%esi
movl %ebx,%edi
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%edi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpor %xmm8,%xmm4,%xmm4
addl 12(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups 48(%r15),%xmm15
xorl %ecx,%edi
movl %eax,%esi
shldl $5,%eax,%eax
addl %edi,%ebp
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%ebp
vpalignr $8,%xmm3,%xmm4,%xmm8
vpxor %xmm1,%xmm5,%xmm5
addl 16(%rsp),%edx
xorl %ebx,%esi
movl %ebp,%edi
shldl $5,%ebp,%ebp
vpxor %xmm6,%xmm5,%xmm5
addl %esi,%edx
xorl %ebx,%edi
vpaddd %xmm4,%xmm10,%xmm9
shrdl $7,%eax,%eax
addl %ebp,%edx
vpxor %xmm8,%xmm5,%xmm5
addl 20(%rsp),%ecx
xorl %eax,%edi
movl %edx,%esi
shldl $5,%edx,%edx
vpsrld $30,%xmm5,%xmm8
vmovdqa %xmm9,0(%rsp)
addl %edi,%ecx
cmpl $11,%r8d
jb .Lvaesenclast7
vaesenc %xmm15,%xmm12,%xmm12
vmovups 64(%r15),%xmm14
vaesenc %xmm14,%xmm12,%xmm12
vmovups 80(%r15),%xmm15
je .Lvaesenclast7
vaesenc %xmm15,%xmm12,%xmm12
vmovups 96(%r15),%xmm14
vaesenc %xmm14,%xmm12,%xmm12
vmovups 112(%r15),%xmm15
.Lvaesenclast7:
vaesenclast %xmm15,%xmm12,%xmm12
vmovups -112(%r15),%xmm15
vmovups 16-112(%r15),%xmm14
xorl %eax,%esi
shrdl $7,%ebp,%ebp
addl %edx,%ecx
vpslld $2,%xmm5,%xmm5
addl 24(%rsp),%ebx
xorl %ebp,%esi
movl %ecx,%edi
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %ebp,%edi
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpor %xmm8,%xmm5,%xmm5
addl 28(%rsp),%eax
shrdl $7,%ecx,%ecx
movl %ebx,%esi
xorl %edx,%edi
shldl $5,%ebx,%ebx
addl %edi,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
vpalignr $8,%xmm4,%xmm5,%xmm8
vpxor %xmm2,%xmm6,%xmm6
addl 32(%rsp),%ebp
vmovdqu 32(%r12),%xmm13
vpxor %xmm15,%xmm13,%xmm13
vmovups %xmm12,16(%r13,%r12,1)
vpxor %xmm13,%xmm12,%xmm12
vaesenc %xmm14,%xmm12,%xmm12
vmovups -80(%r15),%xmm15
andl %ecx,%esi
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
vpxor %xmm7,%xmm6,%xmm6
movl %eax,%edi
xorl %ecx,%esi
vpaddd %xmm5,%xmm10,%xmm9
shldl $5,%eax,%eax
addl %esi,%ebp
vpxor %xmm8,%xmm6,%xmm6
xorl %ebx,%edi
xorl %ecx,%ebx
addl %eax,%ebp
addl 36(%rsp),%edx
vpsrld $30,%xmm6,%xmm8
vmovdqa %xmm9,16(%rsp)
andl %ebx,%edi
xorl %ecx,%ebx
shrdl $7,%eax,%eax
movl %ebp,%esi
vpslld $2,%xmm6,%xmm6
xorl %ebx,%edi
shldl $5,%ebp,%ebp
addl %edi,%edx
vaesenc %xmm15,%xmm12,%xmm12
vmovups -64(%r15),%xmm14
xorl %eax,%esi
xorl %ebx,%eax
addl %ebp,%edx
addl 40(%rsp),%ecx
andl %eax,%esi
vpor %xmm8,%xmm6,%xmm6
xorl %ebx,%eax
shrdl $7,%ebp,%ebp
movl %edx,%edi
xorl %eax,%esi
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %ebp,%edi
xorl %eax,%ebp
addl %edx,%ecx
addl 44(%rsp),%ebx
andl %ebp,%edi
xorl %eax,%ebp
shrdl $7,%edx,%edx
vaesenc %xmm14,%xmm12,%xmm12
vmovups -48(%r15),%xmm15
movl %ecx,%esi
xorl %ebp,%edi
shldl $5,%ecx,%ecx
addl %edi,%ebx
xorl %edx,%esi
xorl %ebp,%edx
addl %ecx,%ebx
vpalignr $8,%xmm5,%xmm6,%xmm8
vpxor %xmm3,%xmm7,%xmm7
addl 48(%rsp),%eax
andl %edx,%esi
xorl %ebp,%edx
shrdl $7,%ecx,%ecx
vpxor %xmm0,%xmm7,%xmm7
movl %ebx,%edi
xorl %edx,%esi
vpaddd %xmm6,%xmm10,%xmm9
vmovdqa 48(%r11),%xmm10
shldl $5,%ebx,%ebx
addl %esi,%eax
vpxor %xmm8,%xmm7,%xmm7
xorl %ecx,%edi
xorl %edx,%ecx
addl %ebx,%eax
addl 52(%rsp),%ebp
vaesenc %xmm15,%xmm12,%xmm12
vmovups -32(%r15),%xmm14
vpsrld $30,%xmm7,%xmm8
vmovdqa %xmm9,32(%rsp)
andl %ecx,%edi
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
movl %eax,%esi
vpslld $2,%xmm7,%xmm7
xorl %ecx,%edi
shldl $5,%eax,%eax
addl %edi,%ebp
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%ebp
addl 56(%rsp),%edx
andl %ebx,%esi
vpor %xmm8,%xmm7,%xmm7
xorl %ecx,%ebx
shrdl $7,%eax,%eax
movl %ebp,%edi
xorl %ebx,%esi
shldl $5,%ebp,%ebp
addl %esi,%edx
vaesenc %xmm14,%xmm12,%xmm12
vmovups -16(%r15),%xmm15
xorl %eax,%edi
xorl %ebx,%eax
addl %ebp,%edx
addl 60(%rsp),%ecx
andl %eax,%edi
xorl %ebx,%eax
shrdl $7,%ebp,%ebp
movl %edx,%esi
xorl %eax,%edi
shldl $5,%edx,%edx
addl %edi,%ecx
xorl %ebp,%esi
xorl %eax,%ebp
addl %edx,%ecx
vpalignr $8,%xmm6,%xmm7,%xmm8
vpxor %xmm4,%xmm0,%xmm0
addl 0(%rsp),%ebx
andl %ebp,%esi
xorl %eax,%ebp
shrdl $7,%edx,%edx
vaesenc %xmm15,%xmm12,%xmm12
vmovups 0(%r15),%xmm14
vpxor %xmm1,%xmm0,%xmm0
movl %ecx,%edi
xorl %ebp,%esi
vpaddd %xmm7,%xmm10,%xmm9
shldl $5,%ecx,%ecx
addl %esi,%ebx
vpxor %xmm8,%xmm0,%xmm0
xorl %edx,%edi
xorl %ebp,%edx
addl %ecx,%ebx
addl 4(%rsp),%eax
vpsrld $30,%xmm0,%xmm8
vmovdqa %xmm9,48(%rsp)
andl %edx,%edi
xorl %ebp,%edx
shrdl $7,%ecx,%ecx
movl %ebx,%esi
vpslld $2,%xmm0,%xmm0
xorl %edx,%edi
shldl $5,%ebx,%ebx
addl %edi,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
addl 8(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups 16(%r15),%xmm15
andl %ecx,%esi
vpor %xmm8,%xmm0,%xmm0
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
movl %eax,%edi
xorl %ecx,%esi
shldl $5,%eax,%eax
addl %esi,%ebp
xorl %ebx,%edi
xorl %ecx,%ebx
addl %eax,%ebp
addl 12(%rsp),%edx
andl %ebx,%edi
xorl %ecx,%ebx
shrdl $7,%eax,%eax
movl %ebp,%esi
xorl %ebx,%edi
shldl $5,%ebp,%ebp
addl %edi,%edx
vaesenc %xmm15,%xmm12,%xmm12
vmovups 32(%r15),%xmm14
xorl %eax,%esi
xorl %ebx,%eax
addl %ebp,%edx
vpalignr $8,%xmm7,%xmm0,%xmm8
vpxor %xmm5,%xmm1,%xmm1
addl 16(%rsp),%ecx
andl %eax,%esi
xorl %ebx,%eax
shrdl $7,%ebp,%ebp
vpxor %xmm2,%xmm1,%xmm1
movl %edx,%edi
xorl %eax,%esi
vpaddd %xmm0,%xmm10,%xmm9
shldl $5,%edx,%edx
addl %esi,%ecx
vpxor %xmm8,%xmm1,%xmm1
xorl %ebp,%edi
xorl %eax,%ebp
addl %edx,%ecx
addl 20(%rsp),%ebx
vpsrld $30,%xmm1,%xmm8
vmovdqa %xmm9,0(%rsp)
andl %ebp,%edi
xorl %eax,%ebp
shrdl $7,%edx,%edx
vaesenc %xmm14,%xmm12,%xmm12
vmovups 48(%r15),%xmm15
movl %ecx,%esi
vpslld $2,%xmm1,%xmm1
xorl %ebp,%edi
shldl $5,%ecx,%ecx
addl %edi,%ebx
xorl %edx,%esi
xorl %ebp,%edx
addl %ecx,%ebx
addl 24(%rsp),%eax
andl %edx,%esi
vpor %xmm8,%xmm1,%xmm1
xorl %ebp,%edx
shrdl $7,%ecx,%ecx
movl %ebx,%edi
xorl %edx,%esi
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %ecx,%edi
xorl %edx,%ecx
addl %ebx,%eax
addl 28(%rsp),%ebp
cmpl $11,%r8d
jb .Lvaesenclast8
vaesenc %xmm15,%xmm12,%xmm12
vmovups 64(%r15),%xmm14
vaesenc %xmm14,%xmm12,%xmm12
vmovups 80(%r15),%xmm15
je .Lvaesenclast8
vaesenc %xmm15,%xmm12,%xmm12
vmovups 96(%r15),%xmm14
vaesenc %xmm14,%xmm12,%xmm12
vmovups 112(%r15),%xmm15
.Lvaesenclast8:
vaesenclast %xmm15,%xmm12,%xmm12
vmovups -112(%r15),%xmm15
vmovups 16-112(%r15),%xmm14
andl %ecx,%edi
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
movl %eax,%esi
xorl %ecx,%edi
shldl $5,%eax,%eax
addl %edi,%ebp
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%ebp
vpalignr $8,%xmm0,%xmm1,%xmm8
vpxor %xmm6,%xmm2,%xmm2
addl 32(%rsp),%edx
andl %ebx,%esi
xorl %ecx,%ebx
shrdl $7,%eax,%eax
vpxor %xmm3,%xmm2,%xmm2
movl %ebp,%edi
xorl %ebx,%esi
vpaddd %xmm1,%xmm10,%xmm9
shldl $5,%ebp,%ebp
addl %esi,%edx
vmovdqu 48(%r12),%xmm13
vpxor %xmm15,%xmm13,%xmm13
vmovups %xmm12,32(%r13,%r12,1)
vpxor %xmm13,%xmm12,%xmm12
vaesenc %xmm14,%xmm12,%xmm12
vmovups -80(%r15),%xmm15
vpxor %xmm8,%xmm2,%xmm2
xorl %eax,%edi
xorl %ebx,%eax
addl %ebp,%edx
addl 36(%rsp),%ecx
vpsrld $30,%xmm2,%xmm8
vmovdqa %xmm9,16(%rsp)
andl %eax,%edi
xorl %ebx,%eax
shrdl $7,%ebp,%ebp
movl %edx,%esi
vpslld $2,%xmm2,%xmm2
xorl %eax,%edi
shldl $5,%edx,%edx
addl %edi,%ecx
xorl %ebp,%esi
xorl %eax,%ebp
addl %edx,%ecx
addl 40(%rsp),%ebx
andl %ebp,%esi
vpor %xmm8,%xmm2,%xmm2
xorl %eax,%ebp
shrdl $7,%edx,%edx
vaesenc %xmm15,%xmm12,%xmm12
vmovups -64(%r15),%xmm14
movl %ecx,%edi
xorl %ebp,%esi
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edx,%edi
xorl %ebp,%edx
addl %ecx,%ebx
addl 44(%rsp),%eax
andl %edx,%edi
xorl %ebp,%edx
shrdl $7,%ecx,%ecx
movl %ebx,%esi
xorl %edx,%edi
shldl $5,%ebx,%ebx
addl %edi,%eax
xorl %edx,%esi
addl %ebx,%eax
vpalignr $8,%xmm1,%xmm2,%xmm8
vpxor %xmm7,%xmm3,%xmm3
addl 48(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups -48(%r15),%xmm15
xorl %ecx,%esi
movl %eax,%edi
shldl $5,%eax,%eax
vpxor %xmm4,%xmm3,%xmm3
addl %esi,%ebp
xorl %ecx,%edi
vpaddd %xmm2,%xmm10,%xmm9
shrdl $7,%ebx,%ebx
addl %eax,%ebp
vpxor %xmm8,%xmm3,%xmm3
addl 52(%rsp),%edx
xorl %ebx,%edi
movl %ebp,%esi
shldl $5,%ebp,%ebp
vpsrld $30,%xmm3,%xmm8
vmovdqa %xmm9,32(%rsp)
addl %edi,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %ebp,%edx
vpslld $2,%xmm3,%xmm3
addl 56(%rsp),%ecx
xorl %eax,%esi
movl %edx,%edi
shldl $5,%edx,%edx
addl %esi,%ecx
vaesenc %xmm15,%xmm12,%xmm12
vmovups -32(%r15),%xmm14
xorl %eax,%edi
shrdl $7,%ebp,%ebp
addl %edx,%ecx
vpor %xmm8,%xmm3,%xmm3
addl 60(%rsp),%ebx
xorl %ebp,%edi
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %edi,%ebx
xorl %ebp,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 0(%rsp),%eax
vpaddd %xmm3,%xmm10,%xmm9
xorl %edx,%esi
movl %ebx,%edi
shldl $5,%ebx,%ebx
addl %esi,%eax
vmovdqa %xmm9,48(%rsp)
xorl %edx,%edi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 4(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups -16(%r15),%xmm15
xorl %ecx,%edi
movl %eax,%esi
shldl $5,%eax,%eax
addl %edi,%ebp
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%ebp
addl 8(%rsp),%edx
xorl %ebx,%esi
movl %ebp,%edi
shldl $5,%ebp,%ebp
addl %esi,%edx
xorl %ebx,%edi
shrdl $7,%eax,%eax
addl %ebp,%edx
addl 12(%rsp),%ecx
xorl %eax,%edi
movl %edx,%esi
shldl $5,%edx,%edx
addl %edi,%ecx
vaesenc %xmm15,%xmm12,%xmm12
vmovups 0(%r15),%xmm14
xorl %eax,%esi
shrdl $7,%ebp,%ebp
addl %edx,%ecx
cmpq %r14,%r10
je .Ldone_avx
vmovdqa 64(%r11),%xmm9
vmovdqa 0(%r11),%xmm10
vmovdqu 0(%r10),%xmm0
vmovdqu 16(%r10),%xmm1
vmovdqu 32(%r10),%xmm2
vmovdqu 48(%r10),%xmm3
vpshufb %xmm9,%xmm0,%xmm0
addq $64,%r10
addl 16(%rsp),%ebx
xorl %ebp,%esi
vpshufb %xmm9,%xmm1,%xmm1
movl %ecx,%edi
shldl $5,%ecx,%ecx
vpaddd %xmm10,%xmm0,%xmm8
addl %esi,%ebx
xorl %ebp,%edi
shrdl $7,%edx,%edx
addl %ecx,%ebx
vmovdqa %xmm8,0(%rsp)
addl 20(%rsp),%eax
xorl %edx,%edi
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %edi,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 24(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups 16(%r15),%xmm15
xorl %ecx,%esi
movl %eax,%edi
shldl $5,%eax,%eax
addl %esi,%ebp
xorl %ecx,%edi
shrdl $7,%ebx,%ebx
addl %eax,%ebp
addl 28(%rsp),%edx
xorl %ebx,%edi
movl %ebp,%esi
shldl $5,%ebp,%ebp
addl %edi,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %ebp,%edx
addl 32(%rsp),%ecx
xorl %eax,%esi
vpshufb %xmm9,%xmm2,%xmm2
movl %edx,%edi
shldl $5,%edx,%edx
vpaddd %xmm10,%xmm1,%xmm8
addl %esi,%ecx
vaesenc %xmm15,%xmm12,%xmm12
vmovups 32(%r15),%xmm14
xorl %eax,%edi
shrdl $7,%ebp,%ebp
addl %edx,%ecx
vmovdqa %xmm8,16(%rsp)
addl 36(%rsp),%ebx
xorl %ebp,%edi
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %edi,%ebx
xorl %ebp,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 40(%rsp),%eax
xorl %edx,%esi
movl %ebx,%edi
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%edi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 44(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups 48(%r15),%xmm15
xorl %ecx,%edi
movl %eax,%esi
shldl $5,%eax,%eax
addl %edi,%ebp
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%ebp
addl 48(%rsp),%edx
xorl %ebx,%esi
vpshufb %xmm9,%xmm3,%xmm3
movl %ebp,%edi
shldl $5,%ebp,%ebp
vpaddd %xmm10,%xmm2,%xmm8
addl %esi,%edx
xorl %ebx,%edi
shrdl $7,%eax,%eax
addl %ebp,%edx
vmovdqa %xmm8,32(%rsp)
addl 52(%rsp),%ecx
xorl %eax,%edi
movl %edx,%esi
shldl $5,%edx,%edx
addl %edi,%ecx
cmpl $11,%r8d
jb .Lvaesenclast9
vaesenc %xmm15,%xmm12,%xmm12
vmovups 64(%r15),%xmm14
vaesenc %xmm14,%xmm12,%xmm12
vmovups 80(%r15),%xmm15
je .Lvaesenclast9
vaesenc %xmm15,%xmm12,%xmm12
vmovups 96(%r15),%xmm14
vaesenc %xmm14,%xmm12,%xmm12
vmovups 112(%r15),%xmm15
.Lvaesenclast9:
vaesenclast %xmm15,%xmm12,%xmm12
vmovups -112(%r15),%xmm15
vmovups 16-112(%r15),%xmm14
xorl %eax,%esi
shrdl $7,%ebp,%ebp
addl %edx,%ecx
addl 56(%rsp),%ebx
xorl %ebp,%esi
movl %ecx,%edi
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %ebp,%edi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 60(%rsp),%eax
xorl %edx,%edi
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %edi,%eax
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vmovups %xmm12,48(%r13,%r12,1)
leaq 64(%r12),%r12
addl 0(%r9),%eax
addl 4(%r9),%esi
addl 8(%r9),%ecx
addl 12(%r9),%edx
movl %eax,0(%r9)
addl 16(%r9),%ebp
movl %esi,4(%r9)
movl %esi,%ebx
movl %ecx,8(%r9)
movl %ecx,%edi
movl %edx,12(%r9)
xorl %edx,%edi
movl %ebp,16(%r9)
andl %edi,%esi
jmp .Loop_avx
.Ldone_avx:
addl 16(%rsp),%ebx
xorl %ebp,%esi
movl %ecx,%edi
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %ebp,%edi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 20(%rsp),%eax
xorl %edx,%edi
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %edi,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 24(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups 16(%r15),%xmm15
xorl %ecx,%esi
movl %eax,%edi
shldl $5,%eax,%eax
addl %esi,%ebp
xorl %ecx,%edi
shrdl $7,%ebx,%ebx
addl %eax,%ebp
addl 28(%rsp),%edx
xorl %ebx,%edi
movl %ebp,%esi
shldl $5,%ebp,%ebp
addl %edi,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %ebp,%edx
addl 32(%rsp),%ecx
xorl %eax,%esi
movl %edx,%edi
shldl $5,%edx,%edx
addl %esi,%ecx
vaesenc %xmm15,%xmm12,%xmm12
vmovups 32(%r15),%xmm14
xorl %eax,%edi
shrdl $7,%ebp,%ebp
addl %edx,%ecx
addl 36(%rsp),%ebx
xorl %ebp,%edi
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %edi,%ebx
xorl %ebp,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 40(%rsp),%eax
xorl %edx,%esi
movl %ebx,%edi
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%edi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 44(%rsp),%ebp
vaesenc %xmm14,%xmm12,%xmm12
vmovups 48(%r15),%xmm15
xorl %ecx,%edi
movl %eax,%esi
shldl $5,%eax,%eax
addl %edi,%ebp
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%ebp
addl 48(%rsp),%edx
xorl %ebx,%esi
movl %ebp,%edi
shldl $5,%ebp,%ebp
addl %esi,%edx
xorl %ebx,%edi
shrdl $7,%eax,%eax
addl %ebp,%edx
addl 52(%rsp),%ecx
xorl %eax,%edi
movl %edx,%esi
shldl $5,%edx,%edx
addl %edi,%ecx
cmpl $11,%r8d
jb .Lvaesenclast10
vaesenc %xmm15,%xmm12,%xmm12
vmovups 64(%r15),%xmm14
vaesenc %xmm14,%xmm12,%xmm12
vmovups 80(%r15),%xmm15
je .Lvaesenclast10
vaesenc %xmm15,%xmm12,%xmm12
vmovups 96(%r15),%xmm14
vaesenc %xmm14,%xmm12,%xmm12
vmovups 112(%r15),%xmm15
.Lvaesenclast10:
vaesenclast %xmm15,%xmm12,%xmm12
vmovups -112(%r15),%xmm15
vmovups 16-112(%r15),%xmm14
xorl %eax,%esi
shrdl $7,%ebp,%ebp
addl %edx,%ecx
addl 56(%rsp),%ebx
xorl %ebp,%esi
movl %ecx,%edi
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %ebp,%edi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 60(%rsp),%eax
xorl %edx,%edi
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %edi,%eax
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vmovups %xmm12,48(%r13,%r12,1)
movq 88(%rsp),%r8
addl 0(%r9),%eax
addl 4(%r9),%esi
addl 8(%r9),%ecx
movl %eax,0(%r9)
addl 12(%r9),%edx
movl %esi,4(%r9)
addl 16(%r9),%ebp
movl %ecx,8(%r9)
movl %edx,12(%r9)
movl %ebp,16(%r9)
vmovups %xmm12,(%r8)
vzeroall
leaq 104(%rsp),%rsi
.cfi_def_cfa %rsi,56
movq 0(%rsi),%r15
.cfi_restore %r15
movq 8(%rsi),%r14
.cfi_restore %r14
movq 16(%rsi),%r13
.cfi_restore %r13
movq 24(%rsi),%r12
.cfi_restore %r12
movq 32(%rsi),%rbp
.cfi_restore %rbp
movq 40(%rsi),%rbx
.cfi_restore %rbx
leaq 48(%rsi),%rsp
.cfi_def_cfa %rsp,8
.Lepilogue_avx:
.byte 0xf3,0xc3
.cfi_endproc
.size aesni_cbc_sha1_enc_avx,.-aesni_cbc_sha1_enc_avx
.section .rodata
.align 64
K_XX_XX:
.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
.byte 65,69,83,78,73,45,67,66,67,43,83,72,65,49,32,115,116,105,116,99,104,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.align 64
.type aesni_cbc_sha1_enc_shaext,@function
.align 32
aesni_cbc_sha1_enc_shaext:
.cfi_startproc
movq 8(%rsp),%r10
movdqu (%r9),%xmm8
movd 16(%r9),%xmm9
movdqa K_XX_XX+80(%rip),%xmm7
movl 240(%rcx),%r11d
subq %rdi,%rsi
movups (%rcx),%xmm15
movups (%r8),%xmm2
movups 16(%rcx),%xmm0
leaq 112(%rcx),%rcx
pshufd $27,%xmm8,%xmm8
pshufd $27,%xmm9,%xmm9
jmp .Loop_shaext
.align 16
.Loop_shaext:
movups 0(%rdi),%xmm14
xorps %xmm15,%xmm14
xorps %xmm14,%xmm2
movups -80(%rcx),%xmm1
.byte 102,15,56,220,208
movdqu (%r10),%xmm3
movdqa %xmm9,%xmm12
.byte 102,15,56,0,223
movdqu 16(%r10),%xmm4
movdqa %xmm8,%xmm11
movups -64(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,0,231
paddd %xmm3,%xmm9
movdqu 32(%r10),%xmm5
leaq 64(%r10),%r10
pxor %xmm12,%xmm3
movups -48(%rcx),%xmm1
.byte 102,15,56,220,208
pxor %xmm12,%xmm3
movdqa %xmm8,%xmm10
.byte 102,15,56,0,239
.byte 69,15,58,204,193,0
.byte 68,15,56,200,212
movups -32(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 15,56,201,220
movdqu -16(%r10),%xmm6
movdqa %xmm8,%xmm9
.byte 102,15,56,0,247
movups -16(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 69,15,58,204,194,0
.byte 68,15,56,200,205
pxor %xmm5,%xmm3
.byte 15,56,201,229
movups 0(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm10
.byte 69,15,58,204,193,0
.byte 68,15,56,200,214
movups 16(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,222
pxor %xmm6,%xmm4
.byte 15,56,201,238
movups 32(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm9
.byte 69,15,58,204,194,0
.byte 68,15,56,200,203
movups 48(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,227
pxor %xmm3,%xmm5
.byte 15,56,201,243
cmpl $11,%r11d
jb .Laesenclast11
movups 64(%rcx),%xmm0
.byte 102,15,56,220,209
movups 80(%rcx),%xmm1
.byte 102,15,56,220,208
je .Laesenclast11
movups 96(%rcx),%xmm0
.byte 102,15,56,220,209
movups 112(%rcx),%xmm1
.byte 102,15,56,220,208
.Laesenclast11:
.byte 102,15,56,221,209
movups 16-112(%rcx),%xmm0
movdqa %xmm8,%xmm10
.byte 69,15,58,204,193,0
.byte 68,15,56,200,212
movups 16(%rdi),%xmm14
xorps %xmm15,%xmm14
movups %xmm2,0(%rsi,%rdi,1)
xorps %xmm14,%xmm2
movups -80(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,236
pxor %xmm4,%xmm6
.byte 15,56,201,220
movups -64(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm9
.byte 69,15,58,204,194,1
.byte 68,15,56,200,205
movups -48(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,245
pxor %xmm5,%xmm3
.byte 15,56,201,229
movups -32(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm10
.byte 69,15,58,204,193,1
.byte 68,15,56,200,214
movups -16(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,222
pxor %xmm6,%xmm4
.byte 15,56,201,238
movups 0(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm9
.byte 69,15,58,204,194,1
.byte 68,15,56,200,203
movups 16(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,227
pxor %xmm3,%xmm5
.byte 15,56,201,243
movups 32(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm10
.byte 69,15,58,204,193,1
.byte 68,15,56,200,212
movups 48(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,236
pxor %xmm4,%xmm6
.byte 15,56,201,220
cmpl $11,%r11d
jb .Laesenclast12
movups 64(%rcx),%xmm0
.byte 102,15,56,220,209
movups 80(%rcx),%xmm1
.byte 102,15,56,220,208
je .Laesenclast12
movups 96(%rcx),%xmm0
.byte 102,15,56,220,209
movups 112(%rcx),%xmm1
.byte 102,15,56,220,208
.Laesenclast12:
.byte 102,15,56,221,209
movups 16-112(%rcx),%xmm0
movdqa %xmm8,%xmm9
.byte 69,15,58,204,194,1
.byte 68,15,56,200,205
movups 32(%rdi),%xmm14
xorps %xmm15,%xmm14
movups %xmm2,16(%rsi,%rdi,1)
xorps %xmm14,%xmm2
movups -80(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,245
pxor %xmm5,%xmm3
.byte 15,56,201,229
movups -64(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm10
.byte 69,15,58,204,193,2
.byte 68,15,56,200,214
movups -48(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,222
pxor %xmm6,%xmm4
.byte 15,56,201,238
movups -32(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm9
.byte 69,15,58,204,194,2
.byte 68,15,56,200,203
movups -16(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,227
pxor %xmm3,%xmm5
.byte 15,56,201,243
movups 0(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm10
.byte 69,15,58,204,193,2
.byte 68,15,56,200,212
movups 16(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,236
pxor %xmm4,%xmm6
.byte 15,56,201,220
movups 32(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm9
.byte 69,15,58,204,194,2
.byte 68,15,56,200,205
movups 48(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,245
pxor %xmm5,%xmm3
.byte 15,56,201,229
cmpl $11,%r11d
jb .Laesenclast13
movups 64(%rcx),%xmm0
.byte 102,15,56,220,209
movups 80(%rcx),%xmm1
.byte 102,15,56,220,208
je .Laesenclast13
movups 96(%rcx),%xmm0
.byte 102,15,56,220,209
movups 112(%rcx),%xmm1
.byte 102,15,56,220,208
.Laesenclast13:
.byte 102,15,56,221,209
movups 16-112(%rcx),%xmm0
movdqa %xmm8,%xmm10
.byte 69,15,58,204,193,2
.byte 68,15,56,200,214
movups 48(%rdi),%xmm14
xorps %xmm15,%xmm14
movups %xmm2,32(%rsi,%rdi,1)
xorps %xmm14,%xmm2
movups -80(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,222
pxor %xmm6,%xmm4
.byte 15,56,201,238
movups -64(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm9
.byte 69,15,58,204,194,3
.byte 68,15,56,200,203
movups -48(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 15,56,202,227
pxor %xmm3,%xmm5
.byte 15,56,201,243
movups -32(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm8,%xmm10
.byte 69,15,58,204,193,3
.byte 68,15,56,200,212
.byte 15,56,202,236
pxor %xmm4,%xmm6
movups -16(%rcx),%xmm1
.byte 102,15,56,220,208
movdqa %xmm8,%xmm9
.byte 69,15,58,204,194,3
.byte 68,15,56,200,205
.byte 15,56,202,245
movups 0(%rcx),%xmm0
.byte 102,15,56,220,209
movdqa %xmm12,%xmm5
movdqa %xmm8,%xmm10
.byte 69,15,58,204,193,3
.byte 68,15,56,200,214
movups 16(%rcx),%xmm1
.byte 102,15,56,220,208
movdqa %xmm8,%xmm9
.byte 69,15,58,204,194,3
.byte 68,15,56,200,205
movups 32(%rcx),%xmm0
.byte 102,15,56,220,209
movups 48(%rcx),%xmm1
.byte 102,15,56,220,208
cmpl $11,%r11d
jb .Laesenclast14
movups 64(%rcx),%xmm0
.byte 102,15,56,220,209
movups 80(%rcx),%xmm1
.byte 102,15,56,220,208
je .Laesenclast14
movups 96(%rcx),%xmm0
.byte 102,15,56,220,209
movups 112(%rcx),%xmm1
.byte 102,15,56,220,208
.Laesenclast14:
.byte 102,15,56,221,209
movups 16-112(%rcx),%xmm0
decq %rdx
paddd %xmm11,%xmm8
movups %xmm2,48(%rsi,%rdi,1)
leaq 64(%rdi),%rdi
jnz .Loop_shaext
pshufd $27,%xmm8,%xmm8
pshufd $27,%xmm9,%xmm9
movups %xmm2,(%r8)
movdqu %xmm8,(%r9)
movd %xmm9,16(%r9)
.byte 0xf3,0xc3
.cfi_endproc
.size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext
#endif
|
wlsfx/bnbb
| 3,671
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/test/trampoline-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _abi_test_trampoline
.private_extern _abi_test_trampoline
.align 4
_abi_test_trampoline:
L_abi_test_trampoline_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 24(%esp),%ecx
movl (%ecx),%esi
movl 4(%ecx),%edi
movl 8(%ecx),%ebx
movl 12(%ecx),%ebp
subl $44,%esp
movl 72(%esp),%eax
xorl %ecx,%ecx
L000loop:
cmpl 76(%esp),%ecx
jae L001loop_done
movl (%eax,%ecx,4),%edx
movl %edx,(%esp,%ecx,4)
addl $1,%ecx
jmp L000loop
L001loop_done:
call *64(%esp)
addl $44,%esp
movl 24(%esp),%ecx
movl %esi,(%ecx)
movl %edi,4(%ecx)
movl %ebx,8(%ecx)
movl %ebp,12(%ecx)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _abi_test_get_and_clear_direction_flag
.private_extern _abi_test_get_and_clear_direction_flag
.align 4
_abi_test_get_and_clear_direction_flag:
L_abi_test_get_and_clear_direction_flag_begin:
pushfl
popl %eax
andl $1024,%eax
shrl $10,%eax
cld
ret
.globl _abi_test_set_direction_flag
.private_extern _abi_test_set_direction_flag
.align 4
_abi_test_set_direction_flag:
L_abi_test_set_direction_flag_begin:
std
ret
.globl _abi_test_clobber_eax
.private_extern _abi_test_clobber_eax
.align 4
_abi_test_clobber_eax:
L_abi_test_clobber_eax_begin:
xorl %eax,%eax
ret
.globl _abi_test_clobber_ebx
.private_extern _abi_test_clobber_ebx
.align 4
_abi_test_clobber_ebx:
L_abi_test_clobber_ebx_begin:
xorl %ebx,%ebx
ret
.globl _abi_test_clobber_ecx
.private_extern _abi_test_clobber_ecx
.align 4
_abi_test_clobber_ecx:
L_abi_test_clobber_ecx_begin:
xorl %ecx,%ecx
ret
.globl _abi_test_clobber_edx
.private_extern _abi_test_clobber_edx
.align 4
_abi_test_clobber_edx:
L_abi_test_clobber_edx_begin:
xorl %edx,%edx
ret
.globl _abi_test_clobber_edi
.private_extern _abi_test_clobber_edi
.align 4
_abi_test_clobber_edi:
L_abi_test_clobber_edi_begin:
xorl %edi,%edi
ret
.globl _abi_test_clobber_esi
.private_extern _abi_test_clobber_esi
.align 4
_abi_test_clobber_esi:
L_abi_test_clobber_esi_begin:
xorl %esi,%esi
ret
.globl _abi_test_clobber_ebp
.private_extern _abi_test_clobber_ebp
.align 4
_abi_test_clobber_ebp:
L_abi_test_clobber_ebp_begin:
xorl %ebp,%ebp
ret
.globl _abi_test_clobber_xmm0
.private_extern _abi_test_clobber_xmm0
.align 4
_abi_test_clobber_xmm0:
L_abi_test_clobber_xmm0_begin:
pxor %xmm0,%xmm0
ret
.globl _abi_test_clobber_xmm1
.private_extern _abi_test_clobber_xmm1
.align 4
_abi_test_clobber_xmm1:
L_abi_test_clobber_xmm1_begin:
pxor %xmm1,%xmm1
ret
.globl _abi_test_clobber_xmm2
.private_extern _abi_test_clobber_xmm2
.align 4
_abi_test_clobber_xmm2:
L_abi_test_clobber_xmm2_begin:
pxor %xmm2,%xmm2
ret
.globl _abi_test_clobber_xmm3
.private_extern _abi_test_clobber_xmm3
.align 4
_abi_test_clobber_xmm3:
L_abi_test_clobber_xmm3_begin:
pxor %xmm3,%xmm3
ret
.globl _abi_test_clobber_xmm4
.private_extern _abi_test_clobber_xmm4
.align 4
_abi_test_clobber_xmm4:
L_abi_test_clobber_xmm4_begin:
pxor %xmm4,%xmm4
ret
.globl _abi_test_clobber_xmm5
.private_extern _abi_test_clobber_xmm5
.align 4
_abi_test_clobber_xmm5:
L_abi_test_clobber_xmm5_begin:
pxor %xmm5,%xmm5
ret
.globl _abi_test_clobber_xmm6
.private_extern _abi_test_clobber_xmm6
.align 4
_abi_test_clobber_xmm6:
L_abi_test_clobber_xmm6_begin:
pxor %xmm6,%xmm6
ret
.globl _abi_test_clobber_xmm7
.private_extern _abi_test_clobber_xmm7
.align 4
_abi_test_clobber_xmm7:
L_abi_test_clobber_xmm7_begin:
pxor %xmm7,%xmm7
ret
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 98,564
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/sha256-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _sha256_block_data_order_nohw
.private_extern _sha256_block_data_order_nohw
.align 4
_sha256_block_data_order_nohw:
L_sha256_block_data_order_nohw_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call L000pic_point
L000pic_point:
popl %ebp
leal LK256-L000pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $6,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
L001no_xmm:
subl %edi,%eax
cmpl $256,%eax
jae L002unrolled
jmp L003loop
.align 4,0x90
L003loop:
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
bswap %eax
movl 12(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
movl 16(%edi),%eax
movl 20(%edi),%ebx
movl 24(%edi),%ecx
bswap %eax
movl 28(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
movl 32(%edi),%eax
movl 36(%edi),%ebx
movl 40(%edi),%ecx
bswap %eax
movl 44(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
movl 48(%edi),%eax
movl 52(%edi),%ebx
movl 56(%edi),%ecx
bswap %eax
movl 60(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
addl $64,%edi
leal -36(%esp),%esp
movl %edi,104(%esp)
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
movl %ebx,8(%esp)
xorl %ecx,%ebx
movl %ecx,12(%esp)
movl %edi,16(%esp)
movl %ebx,(%esp)
movl 16(%esi),%edx
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edi
movl %ebx,24(%esp)
movl %ecx,28(%esp)
movl %edi,32(%esp)
.align 4,0x90
L00400_15:
movl %edx,%ecx
movl 24(%esp),%esi
rorl $14,%ecx
movl 28(%esp),%edi
xorl %edx,%ecx
xorl %edi,%esi
movl 96(%esp),%ebx
rorl $5,%ecx
andl %edx,%esi
movl %edx,20(%esp)
xorl %ecx,%edx
addl 32(%esp),%ebx
xorl %edi,%esi
rorl $6,%edx
movl %eax,%ecx
addl %esi,%ebx
rorl $9,%ecx
addl %edx,%ebx
movl 8(%esp),%edi
xorl %eax,%ecx
movl %eax,4(%esp)
leal -4(%esp),%esp
rorl $11,%ecx
movl (%ebp),%esi
xorl %eax,%ecx
movl 20(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %esi,%ebx
movl %eax,(%esp)
addl %ebx,%edx
andl 4(%esp),%eax
addl %ecx,%ebx
xorl %edi,%eax
addl $4,%ebp
addl %ebx,%eax
cmpl $3248222580,%esi
jne L00400_15
movl 156(%esp),%ecx
jmp L00516_63
.align 4,0x90
L00516_63:
movl %ecx,%ebx
movl 104(%esp),%esi
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 160(%esp),%ebx
shrl $10,%edi
addl 124(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 24(%esp),%esi
rorl $14,%ecx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %edx,%ecx
xorl %edi,%esi
movl %ebx,96(%esp)
rorl $5,%ecx
andl %edx,%esi
movl %edx,20(%esp)
xorl %ecx,%edx
addl 32(%esp),%ebx
xorl %edi,%esi
rorl $6,%edx
movl %eax,%ecx
addl %esi,%ebx
rorl $9,%ecx
addl %edx,%ebx
movl 8(%esp),%edi
xorl %eax,%ecx
movl %eax,4(%esp)
leal -4(%esp),%esp
rorl $11,%ecx
movl (%ebp),%esi
xorl %eax,%ecx
movl 20(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %esi,%ebx
movl %eax,(%esp)
addl %ebx,%edx
andl 4(%esp),%eax
addl %ecx,%ebx
xorl %edi,%eax
movl 156(%esp),%ecx
addl $4,%ebp
addl %ebx,%eax
cmpl $3329325298,%esi
jne L00516_63
movl 356(%esp),%esi
movl 8(%esp),%ebx
movl 16(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebx
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebx,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl 24(%esp),%eax
movl 28(%esp),%ebx
movl 32(%esp),%ecx
movl 360(%esp),%edi
addl 16(%esi),%edx
addl 20(%esi),%eax
addl 24(%esi),%ebx
addl 28(%esi),%ecx
movl %edx,16(%esi)
movl %eax,20(%esi)
movl %ebx,24(%esi)
movl %ecx,28(%esi)
leal 356(%esp),%esp
subl $256,%ebp
cmpl 8(%esp),%edi
jb L003loop
movl 12(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 6,0x90
LK256:
.long 1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298
.long 66051,67438087,134810123,202182159
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97
.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32
.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
.byte 62,0
.align 4,0x90
L002unrolled:
leal -96(%esp),%esp
movl (%esi),%eax
movl 4(%esi),%ebp
movl 8(%esi),%ecx
movl 12(%esi),%ebx
movl %ebp,4(%esp)
xorl %ecx,%ebp
movl %ecx,8(%esp)
movl %ebx,12(%esp)
movl 16(%esi),%edx
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%esi
movl %ebx,20(%esp)
movl %ecx,24(%esp)
movl %esi,28(%esp)
jmp L006grand_loop
.align 4,0x90
L006grand_loop:
movl (%edi),%ebx
movl 4(%edi),%ecx
bswap %ebx
movl 8(%edi),%esi
bswap %ecx
movl %ebx,32(%esp)
bswap %esi
movl %ecx,36(%esp)
movl %esi,40(%esp)
movl 12(%edi),%ebx
movl 16(%edi),%ecx
bswap %ebx
movl 20(%edi),%esi
bswap %ecx
movl %ebx,44(%esp)
bswap %esi
movl %ecx,48(%esp)
movl %esi,52(%esp)
movl 24(%edi),%ebx
movl 28(%edi),%ecx
bswap %ebx
movl 32(%edi),%esi
bswap %ecx
movl %ebx,56(%esp)
bswap %esi
movl %ecx,60(%esp)
movl %esi,64(%esp)
movl 36(%edi),%ebx
movl 40(%edi),%ecx
bswap %ebx
movl 44(%edi),%esi
bswap %ecx
movl %ebx,68(%esp)
bswap %esi
movl %ecx,72(%esp)
movl %esi,76(%esp)
movl 48(%edi),%ebx
movl 52(%edi),%ecx
bswap %ebx
movl 56(%edi),%esi
bswap %ecx
movl %ebx,80(%esp)
bswap %esi
movl %ecx,84(%esp)
movl %esi,88(%esp)
movl 60(%edi),%ebx
addl $64,%edi
bswap %ebx
movl %edi,100(%esp)
movl %ebx,92(%esp)
movl %edx,%ecx
movl 20(%esp),%esi
rorl $14,%edx
movl 24(%esp),%edi
xorl %ecx,%edx
movl 32(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1116352408(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 16(%esp),%ecx
rorl $14,%edx
movl 20(%esp),%edi
xorl %esi,%edx
movl 36(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1899447441(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 12(%esp),%esi
rorl $14,%edx
movl 16(%esp),%edi
xorl %ecx,%edx
movl 40(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3049323471(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 8(%esp),%ecx
rorl $14,%edx
movl 12(%esp),%edi
xorl %esi,%edx
movl 44(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3921009573(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 4(%esp),%esi
rorl $14,%edx
movl 8(%esp),%edi
xorl %ecx,%edx
movl 48(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 961987163(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl (%esp),%ecx
rorl $14,%edx
movl 4(%esp),%edi
xorl %esi,%edx
movl 52(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1508970993(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 28(%esp),%esi
rorl $14,%edx
movl (%esp),%edi
xorl %ecx,%edx
movl 56(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2453635748(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 24(%esp),%ecx
rorl $14,%edx
movl 28(%esp),%edi
xorl %esi,%edx
movl 60(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2870763221(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 20(%esp),%esi
rorl $14,%edx
movl 24(%esp),%edi
xorl %ecx,%edx
movl 64(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3624381080(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 16(%esp),%ecx
rorl $14,%edx
movl 20(%esp),%edi
xorl %esi,%edx
movl 68(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 310598401(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 12(%esp),%esi
rorl $14,%edx
movl 16(%esp),%edi
xorl %ecx,%edx
movl 72(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 607225278(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 8(%esp),%ecx
rorl $14,%edx
movl 12(%esp),%edi
xorl %esi,%edx
movl 76(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1426881987(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 4(%esp),%esi
rorl $14,%edx
movl 8(%esp),%edi
xorl %ecx,%edx
movl 80(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1925078388(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl (%esp),%ecx
rorl $14,%edx
movl 4(%esp),%edi
xorl %esi,%edx
movl 84(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2162078206(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 28(%esp),%esi
rorl $14,%edx
movl (%esp),%edi
xorl %ecx,%edx
movl 88(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2614888103(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 24(%esp),%ecx
rorl $14,%edx
movl 28(%esp),%edi
xorl %esi,%edx
movl 92(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3248222580(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 36(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 88(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 32(%esp),%ebx
shrl $10,%edi
addl 68(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,32(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3835390401(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 40(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 92(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 36(%esp),%ebx
shrl $10,%edi
addl 72(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,36(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 4022224774(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 44(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 32(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 40(%esp),%ebx
shrl $10,%edi
addl 76(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,40(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 264347078(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 48(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 36(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 44(%esp),%ebx
shrl $10,%edi
addl 80(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,44(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 604807628(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 52(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 40(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 48(%esp),%ebx
shrl $10,%edi
addl 84(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,48(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 770255983(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 56(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 44(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 52(%esp),%ebx
shrl $10,%edi
addl 88(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,52(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1249150122(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 60(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 48(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 56(%esp),%ebx
shrl $10,%edi
addl 92(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,56(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1555081692(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 64(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 52(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 60(%esp),%ebx
shrl $10,%edi
addl 32(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,60(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1996064986(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 68(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 56(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 64(%esp),%ebx
shrl $10,%edi
addl 36(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,64(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2554220882(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 72(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 60(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 68(%esp),%ebx
shrl $10,%edi
addl 40(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,68(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2821834349(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 76(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 64(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 72(%esp),%ebx
shrl $10,%edi
addl 44(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,72(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2952996808(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 80(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 68(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 76(%esp),%ebx
shrl $10,%edi
addl 48(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,76(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3210313671(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 84(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 72(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 80(%esp),%ebx
shrl $10,%edi
addl 52(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,80(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3336571891(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 88(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 76(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 84(%esp),%ebx
shrl $10,%edi
addl 56(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,84(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3584528711(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 92(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 80(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 88(%esp),%ebx
shrl $10,%edi
addl 60(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,88(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 113926993(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 32(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 84(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 92(%esp),%ebx
shrl $10,%edi
addl 64(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,92(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 338241895(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 36(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 88(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 32(%esp),%ebx
shrl $10,%edi
addl 68(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,32(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 666307205(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 40(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 92(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 36(%esp),%ebx
shrl $10,%edi
addl 72(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,36(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 773529912(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 44(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 32(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 40(%esp),%ebx
shrl $10,%edi
addl 76(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,40(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1294757372(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 48(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 36(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 44(%esp),%ebx
shrl $10,%edi
addl 80(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,44(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1396182291(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 52(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 40(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 48(%esp),%ebx
shrl $10,%edi
addl 84(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,48(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1695183700(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 56(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 44(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 52(%esp),%ebx
shrl $10,%edi
addl 88(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,52(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1986661051(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 60(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 48(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 56(%esp),%ebx
shrl $10,%edi
addl 92(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,56(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2177026350(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 64(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 52(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 60(%esp),%ebx
shrl $10,%edi
addl 32(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,60(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2456956037(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 68(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 56(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 64(%esp),%ebx
shrl $10,%edi
addl 36(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,64(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2730485921(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 72(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 60(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 68(%esp),%ebx
shrl $10,%edi
addl 40(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,68(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2820302411(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 76(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 64(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 72(%esp),%ebx
shrl $10,%edi
addl 44(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,72(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3259730800(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 80(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 68(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 76(%esp),%ebx
shrl $10,%edi
addl 48(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,76(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3345764771(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 84(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 72(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 80(%esp),%ebx
shrl $10,%edi
addl 52(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,80(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3516065817(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 88(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 76(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 84(%esp),%ebx
shrl $10,%edi
addl 56(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,84(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3600352804(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 92(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 80(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 88(%esp),%ebx
shrl $10,%edi
addl 60(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,88(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 4094571909(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 32(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 84(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 92(%esp),%ebx
shrl $10,%edi
addl 64(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,92(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 275423344(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 36(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 88(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 32(%esp),%ebx
shrl $10,%edi
addl 68(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,32(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 430227734(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 40(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 92(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 36(%esp),%ebx
shrl $10,%edi
addl 72(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,36(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 506948616(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 44(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 32(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 40(%esp),%ebx
shrl $10,%edi
addl 76(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,40(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 659060556(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 48(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 36(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 44(%esp),%ebx
shrl $10,%edi
addl 80(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,44(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 883997877(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 52(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 40(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 48(%esp),%ebx
shrl $10,%edi
addl 84(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,48(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 958139571(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 56(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 44(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 52(%esp),%ebx
shrl $10,%edi
addl 88(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,52(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1322822218(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 60(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 48(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 56(%esp),%ebx
shrl $10,%edi
addl 92(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,56(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1537002063(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 64(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 52(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 60(%esp),%ebx
shrl $10,%edi
addl 32(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,60(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1747873779(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 68(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 56(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 64(%esp),%ebx
shrl $10,%edi
addl 36(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,64(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1955562222(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 72(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 60(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 68(%esp),%ebx
shrl $10,%edi
addl 40(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,68(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2024104815(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 76(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 64(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 72(%esp),%ebx
shrl $10,%edi
addl 44(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,72(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2227730452(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 80(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 68(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 76(%esp),%ebx
shrl $10,%edi
addl 48(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,76(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2361852424(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 84(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 72(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 80(%esp),%ebx
shrl $10,%edi
addl 52(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,80(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2428436474(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 88(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 76(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 84(%esp),%ebx
shrl $10,%edi
addl 56(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,84(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2756734187(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 92(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 80(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 88(%esp),%ebx
shrl $10,%edi
addl 60(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3204031479(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 32(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 84(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 92(%esp),%ebx
shrl $10,%edi
addl 64(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3329325298(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 96(%esp),%esi
xorl %edi,%ebp
movl 12(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebp
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebp,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl %ebp,4(%esp)
xorl %edi,%ebp
movl %edi,8(%esp)
movl %ecx,12(%esp)
movl 20(%esp),%edi
movl 24(%esp),%ebx
movl 28(%esp),%ecx
addl 16(%esi),%edx
addl 20(%esi),%edi
addl 24(%esi),%ebx
addl 28(%esi),%ecx
movl %edx,16(%esi)
movl %edi,20(%esi)
movl %ebx,24(%esi)
movl %ecx,28(%esi)
movl %edi,20(%esp)
movl 100(%esp),%edi
movl %ebx,24(%esp)
movl %ecx,28(%esp)
cmpl 104(%esp),%edi
jb L006grand_loop
movl 108(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _sha256_block_data_order_ssse3
.private_extern _sha256_block_data_order_ssse3
.align 4
_sha256_block_data_order_ssse3:
L_sha256_block_data_order_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call L007pic_point
L007pic_point:
popl %ebp
leal LK256-L007pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $6,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
leal -96(%esp),%esp
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
movl %ebx,4(%esp)
xorl %ecx,%ebx
movl %ecx,8(%esp)
movl %edi,12(%esp)
movl 16(%esi),%edx
movl 20(%esi),%edi
movl 24(%esi),%ecx
movl 28(%esi),%esi
movl %edi,20(%esp)
movl 100(%esp),%edi
movl %ecx,24(%esp)
movl %esi,28(%esp)
movdqa 256(%ebp),%xmm7
jmp L008grand_ssse3
.align 4,0x90
L008grand_ssse3:
movdqu (%edi),%xmm0
movdqu 16(%edi),%xmm1
movdqu 32(%edi),%xmm2
movdqu 48(%edi),%xmm3
addl $64,%edi
.byte 102,15,56,0,199
movl %edi,100(%esp)
.byte 102,15,56,0,207
movdqa (%ebp),%xmm4
.byte 102,15,56,0,215
movdqa 16(%ebp),%xmm5
paddd %xmm0,%xmm4
.byte 102,15,56,0,223
movdqa 32(%ebp),%xmm6
paddd %xmm1,%xmm5
movdqa 48(%ebp),%xmm7
movdqa %xmm4,32(%esp)
paddd %xmm2,%xmm6
movdqa %xmm5,48(%esp)
paddd %xmm3,%xmm7
movdqa %xmm6,64(%esp)
movdqa %xmm7,80(%esp)
jmp L009ssse3_00_47
.align 4,0x90
L009ssse3_00_47:
addl $64,%ebp
movl %edx,%ecx
movdqa %xmm1,%xmm4
rorl $14,%edx
movl 20(%esp),%esi
movdqa %xmm3,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
.byte 102,15,58,15,224,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,250,4
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 4(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm0
movl %eax,(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm3,%xmm7
xorl %esi,%ecx
addl 32(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl 16(%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,12(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm0
movl %ebx,28(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 36(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm0
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
pshufd $80,%xmm0,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 40(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa (%ebp),%xmm6
andl %ecx,%esi
movl %ecx,4(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm0
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
paddd %xmm0,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movdqa %xmm6,32(%esp)
movl %edx,%ecx
movdqa %xmm2,%xmm4
rorl $14,%edx
movl 4(%esp),%esi
movdqa %xmm0,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
.byte 102,15,58,15,225,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,251,4
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 20(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm1
movl %eax,16(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm0,%xmm7
xorl %esi,%ecx
addl 48(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl (%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,28(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm1
movl %ebx,12(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 52(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm1
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
pshufd $80,%xmm1,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 56(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa 16(%ebp),%xmm6
andl %ecx,%esi
movl %ecx,20(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm1
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
paddd %xmm1,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movdqa %xmm6,48(%esp)
movl %edx,%ecx
movdqa %xmm3,%xmm4
rorl $14,%edx
movl 20(%esp),%esi
movdqa %xmm1,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
.byte 102,15,58,15,226,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,248,4
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 4(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm2
movl %eax,(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm1,%xmm7
xorl %esi,%ecx
addl 64(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl 16(%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,12(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm2
movl %ebx,28(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 68(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm2
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
pshufd $80,%xmm2,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 72(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa 32(%ebp),%xmm6
andl %ecx,%esi
movl %ecx,4(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm2
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
paddd %xmm2,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movdqa %xmm6,64(%esp)
movl %edx,%ecx
movdqa %xmm0,%xmm4
rorl $14,%edx
movl 4(%esp),%esi
movdqa %xmm2,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
.byte 102,15,58,15,227,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,249,4
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 20(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm3
movl %eax,16(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm2,%xmm7
xorl %esi,%ecx
addl 80(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl (%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,28(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm3
movl %ebx,12(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 84(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm3
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
pshufd $80,%xmm3,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 88(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa 48(%ebp),%xmm6
andl %ecx,%esi
movl %ecx,20(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm3
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
paddd %xmm3,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movdqa %xmm6,80(%esp)
cmpl $66051,64(%ebp)
jne L009ssse3_00_47
movl %edx,%ecx
rorl $14,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 32(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 36(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 40(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 48(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 52(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 56(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 64(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 68(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 72(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 80(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 84(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 88(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl 96(%esp),%esi
xorl %edi,%ebx
movl 12(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebx
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebx,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl %ebx,4(%esp)
xorl %edi,%ebx
movl %edi,8(%esp)
movl %ecx,12(%esp)
movl 20(%esp),%edi
movl 24(%esp),%ecx
addl 16(%esi),%edx
addl 20(%esi),%edi
addl 24(%esi),%ecx
movl %edx,16(%esi)
movl %edi,20(%esi)
movl %edi,20(%esp)
movl 28(%esp),%edi
movl %ecx,24(%esi)
addl 28(%esi),%edi
movl %ecx,24(%esp)
movl %edi,28(%esi)
movl %edi,28(%esp)
movl 100(%esp),%edi
movdqa 64(%ebp),%xmm7
subl $192,%ebp
cmpl 104(%esp),%edi
jb L008grand_ssse3
movl 108(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _sha256_block_data_order_avx
.private_extern _sha256_block_data_order_avx
.align 4
_sha256_block_data_order_avx:
L_sha256_block_data_order_avx_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call L010pic_point
L010pic_point:
popl %ebp
leal LK256-L010pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $6,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
leal -96(%esp),%esp
vzeroall
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
movl %ebx,4(%esp)
xorl %ecx,%ebx
movl %ecx,8(%esp)
movl %edi,12(%esp)
movl 16(%esi),%edx
movl 20(%esi),%edi
movl 24(%esi),%ecx
movl 28(%esi),%esi
movl %edi,20(%esp)
movl 100(%esp),%edi
movl %ecx,24(%esp)
movl %esi,28(%esp)
vmovdqa 256(%ebp),%xmm7
jmp L011grand_avx
.align 5,0x90
L011grand_avx:
vmovdqu (%edi),%xmm0
vmovdqu 16(%edi),%xmm1
vmovdqu 32(%edi),%xmm2
vmovdqu 48(%edi),%xmm3
addl $64,%edi
vpshufb %xmm7,%xmm0,%xmm0
movl %edi,100(%esp)
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd (%ebp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 16(%ebp),%xmm1,%xmm5
vpaddd 32(%ebp),%xmm2,%xmm6
vpaddd 48(%ebp),%xmm3,%xmm7
vmovdqa %xmm4,32(%esp)
vmovdqa %xmm5,48(%esp)
vmovdqa %xmm6,64(%esp)
vmovdqa %xmm7,80(%esp)
jmp L012avx_00_47
.align 4,0x90
L012avx_00_47:
addl $64,%ebp
vpalignr $4,%xmm0,%xmm1,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
vpalignr $4,%xmm2,%xmm3,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
vpaddd %xmm7,%xmm0,%xmm0
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
vpshufd $250,%xmm3,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 32(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
vpaddd %xmm4,%xmm0,%xmm0
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 36(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
vpaddd %xmm7,%xmm0,%xmm0
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm0,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 40(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm0,%xmm0
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
vpaddd (%ebp),%xmm0,%xmm6
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,32(%esp)
vpalignr $4,%xmm1,%xmm2,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
vpalignr $4,%xmm3,%xmm0,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
vpaddd %xmm7,%xmm1,%xmm1
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
vpshufd $250,%xmm0,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 48(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
vpaddd %xmm4,%xmm1,%xmm1
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 52(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
vpaddd %xmm7,%xmm1,%xmm1
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm1,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 56(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm1,%xmm1
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
vpaddd 16(%ebp),%xmm1,%xmm6
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,48(%esp)
vpalignr $4,%xmm2,%xmm3,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
vpalignr $4,%xmm0,%xmm1,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
vpaddd %xmm7,%xmm2,%xmm2
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
vpshufd $250,%xmm1,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 64(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
vpaddd %xmm4,%xmm2,%xmm2
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 68(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
vpaddd %xmm7,%xmm2,%xmm2
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm2,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 72(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm2,%xmm2
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
vpaddd 32(%ebp),%xmm2,%xmm6
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,64(%esp)
vpalignr $4,%xmm3,%xmm0,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
vpalignr $4,%xmm1,%xmm2,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
vpaddd %xmm7,%xmm3,%xmm3
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
vpshufd $250,%xmm2,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 80(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
vpaddd %xmm4,%xmm3,%xmm3
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 84(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
vpaddd %xmm7,%xmm3,%xmm3
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm3,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 88(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm3,%xmm3
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
vpaddd 48(%ebp),%xmm3,%xmm6
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,80(%esp)
cmpl $66051,64(%ebp)
jne L012avx_00_47
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 32(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 36(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 40(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 48(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 52(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 56(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 64(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 68(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 72(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 80(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 84(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 88(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl 96(%esp),%esi
xorl %edi,%ebx
movl 12(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebx
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebx,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl %ebx,4(%esp)
xorl %edi,%ebx
movl %edi,8(%esp)
movl %ecx,12(%esp)
movl 20(%esp),%edi
movl 24(%esp),%ecx
addl 16(%esi),%edx
addl 20(%esi),%edi
addl 24(%esi),%ecx
movl %edx,16(%esi)
movl %edi,20(%esi)
movl %edi,20(%esp)
movl 28(%esp),%edi
movl %ecx,24(%esi)
addl 28(%esi),%edi
movl %ecx,24(%esp)
movl %edi,28(%esi)
movl %edi,28(%esp)
movl 100(%esp),%edi
vmovdqa 64(%ebp),%xmm7
subl $192,%ebp
cmpl 104(%esp),%edi
jb L011grand_avx
movl 108(%esp),%esp
vzeroall
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 68,212
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/sha1-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _sha1_block_data_order_nohw
.private_extern _sha1_block_data_order_nohw
.align 4
_sha1_block_data_order_nohw:
L_sha1_block_data_order_nohw_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%ebp
movl 24(%esp),%esi
movl 28(%esp),%eax
subl $76,%esp
shll $6,%eax
addl %esi,%eax
movl %eax,104(%esp)
movl 16(%ebp),%edi
jmp L000loop
.align 4,0x90
L000loop:
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,(%esp)
movl %ebx,4(%esp)
movl %ecx,8(%esp)
movl %edx,12(%esp)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,16(%esp)
movl %ebx,20(%esp)
movl %ecx,24(%esp)
movl %edx,28(%esp)
movl 32(%esi),%eax
movl 36(%esi),%ebx
movl 40(%esi),%ecx
movl 44(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,32(%esp)
movl %ebx,36(%esp)
movl %ecx,40(%esp)
movl %edx,44(%esp)
movl 48(%esi),%eax
movl 52(%esi),%ebx
movl 56(%esi),%ecx
movl 60(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,48(%esp)
movl %ebx,52(%esp)
movl %ecx,56(%esp)
movl %edx,60(%esp)
movl %esi,100(%esp)
movl (%ebp),%eax
movl 4(%ebp),%ebx
movl 8(%ebp),%ecx
movl 12(%ebp),%edx
# 00_15 0
movl %ecx,%esi
movl %eax,%ebp
roll $5,%ebp
xorl %edx,%esi
addl %edi,%ebp
movl (%esp),%edi
andl %ebx,%esi
rorl $2,%ebx
xorl %edx,%esi
leal 1518500249(%ebp,%edi,1),%ebp
addl %esi,%ebp
# 00_15 1
movl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
xorl %ecx,%edi
addl %edx,%ebp
movl 4(%esp),%edx
andl %eax,%edi
rorl $2,%eax
xorl %ecx,%edi
leal 1518500249(%ebp,%edx,1),%ebp
addl %edi,%ebp
# 00_15 2
movl %eax,%edx
movl %ebp,%edi
roll $5,%ebp
xorl %ebx,%edx
addl %ecx,%ebp
movl 8(%esp),%ecx
andl %esi,%edx
rorl $2,%esi
xorl %ebx,%edx
leal 1518500249(%ebp,%ecx,1),%ebp
addl %edx,%ebp
# 00_15 3
movl %esi,%ecx
movl %ebp,%edx
roll $5,%ebp
xorl %eax,%ecx
addl %ebx,%ebp
movl 12(%esp),%ebx
andl %edi,%ecx
rorl $2,%edi
xorl %eax,%ecx
leal 1518500249(%ebp,%ebx,1),%ebp
addl %ecx,%ebp
# 00_15 4
movl %edi,%ebx
movl %ebp,%ecx
roll $5,%ebp
xorl %esi,%ebx
addl %eax,%ebp
movl 16(%esp),%eax
andl %edx,%ebx
rorl $2,%edx
xorl %esi,%ebx
leal 1518500249(%ebp,%eax,1),%ebp
addl %ebx,%ebp
# 00_15 5
movl %edx,%eax
movl %ebp,%ebx
roll $5,%ebp
xorl %edi,%eax
addl %esi,%ebp
movl 20(%esp),%esi
andl %ecx,%eax
rorl $2,%ecx
xorl %edi,%eax
leal 1518500249(%ebp,%esi,1),%ebp
addl %eax,%ebp
# 00_15 6
movl %ecx,%esi
movl %ebp,%eax
roll $5,%ebp
xorl %edx,%esi
addl %edi,%ebp
movl 24(%esp),%edi
andl %ebx,%esi
rorl $2,%ebx
xorl %edx,%esi
leal 1518500249(%ebp,%edi,1),%ebp
addl %esi,%ebp
# 00_15 7
movl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
xorl %ecx,%edi
addl %edx,%ebp
movl 28(%esp),%edx
andl %eax,%edi
rorl $2,%eax
xorl %ecx,%edi
leal 1518500249(%ebp,%edx,1),%ebp
addl %edi,%ebp
# 00_15 8
movl %eax,%edx
movl %ebp,%edi
roll $5,%ebp
xorl %ebx,%edx
addl %ecx,%ebp
movl 32(%esp),%ecx
andl %esi,%edx
rorl $2,%esi
xorl %ebx,%edx
leal 1518500249(%ebp,%ecx,1),%ebp
addl %edx,%ebp
# 00_15 9
movl %esi,%ecx
movl %ebp,%edx
roll $5,%ebp
xorl %eax,%ecx
addl %ebx,%ebp
movl 36(%esp),%ebx
andl %edi,%ecx
rorl $2,%edi
xorl %eax,%ecx
leal 1518500249(%ebp,%ebx,1),%ebp
addl %ecx,%ebp
# 00_15 10
movl %edi,%ebx
movl %ebp,%ecx
roll $5,%ebp
xorl %esi,%ebx
addl %eax,%ebp
movl 40(%esp),%eax
andl %edx,%ebx
rorl $2,%edx
xorl %esi,%ebx
leal 1518500249(%ebp,%eax,1),%ebp
addl %ebx,%ebp
# 00_15 11
movl %edx,%eax
movl %ebp,%ebx
roll $5,%ebp
xorl %edi,%eax
addl %esi,%ebp
movl 44(%esp),%esi
andl %ecx,%eax
rorl $2,%ecx
xorl %edi,%eax
leal 1518500249(%ebp,%esi,1),%ebp
addl %eax,%ebp
# 00_15 12
movl %ecx,%esi
movl %ebp,%eax
roll $5,%ebp
xorl %edx,%esi
addl %edi,%ebp
movl 48(%esp),%edi
andl %ebx,%esi
rorl $2,%ebx
xorl %edx,%esi
leal 1518500249(%ebp,%edi,1),%ebp
addl %esi,%ebp
# 00_15 13
movl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
xorl %ecx,%edi
addl %edx,%ebp
movl 52(%esp),%edx
andl %eax,%edi
rorl $2,%eax
xorl %ecx,%edi
leal 1518500249(%ebp,%edx,1),%ebp
addl %edi,%ebp
# 00_15 14
movl %eax,%edx
movl %ebp,%edi
roll $5,%ebp
xorl %ebx,%edx
addl %ecx,%ebp
movl 56(%esp),%ecx
andl %esi,%edx
rorl $2,%esi
xorl %ebx,%edx
leal 1518500249(%ebp,%ecx,1),%ebp
addl %edx,%ebp
# 00_15 15
movl %esi,%ecx
movl %ebp,%edx
roll $5,%ebp
xorl %eax,%ecx
addl %ebx,%ebp
movl 60(%esp),%ebx
andl %edi,%ecx
rorl $2,%edi
xorl %eax,%ecx
leal 1518500249(%ebp,%ebx,1),%ebp
movl (%esp),%ebx
addl %ebp,%ecx
# 16_19 16
movl %edi,%ebp
xorl 8(%esp),%ebx
xorl %esi,%ebp
xorl 32(%esp),%ebx
andl %edx,%ebp
xorl 52(%esp),%ebx
roll $1,%ebx
xorl %esi,%ebp
addl %ebp,%eax
movl %ecx,%ebp
rorl $2,%edx
movl %ebx,(%esp)
roll $5,%ebp
leal 1518500249(%ebx,%eax,1),%ebx
movl 4(%esp),%eax
addl %ebp,%ebx
# 16_19 17
movl %edx,%ebp
xorl 12(%esp),%eax
xorl %edi,%ebp
xorl 36(%esp),%eax
andl %ecx,%ebp
xorl 56(%esp),%eax
roll $1,%eax
xorl %edi,%ebp
addl %ebp,%esi
movl %ebx,%ebp
rorl $2,%ecx
movl %eax,4(%esp)
roll $5,%ebp
leal 1518500249(%eax,%esi,1),%eax
movl 8(%esp),%esi
addl %ebp,%eax
# 16_19 18
movl %ecx,%ebp
xorl 16(%esp),%esi
xorl %edx,%ebp
xorl 40(%esp),%esi
andl %ebx,%ebp
xorl 60(%esp),%esi
roll $1,%esi
xorl %edx,%ebp
addl %ebp,%edi
movl %eax,%ebp
rorl $2,%ebx
movl %esi,8(%esp)
roll $5,%ebp
leal 1518500249(%esi,%edi,1),%esi
movl 12(%esp),%edi
addl %ebp,%esi
# 16_19 19
movl %ebx,%ebp
xorl 20(%esp),%edi
xorl %ecx,%ebp
xorl 44(%esp),%edi
andl %eax,%ebp
xorl (%esp),%edi
roll $1,%edi
xorl %ecx,%ebp
addl %ebp,%edx
movl %esi,%ebp
rorl $2,%eax
movl %edi,12(%esp)
roll $5,%ebp
leal 1518500249(%edi,%edx,1),%edi
movl 16(%esp),%edx
addl %ebp,%edi
# 20_39 20
movl %esi,%ebp
xorl 24(%esp),%edx
xorl %eax,%ebp
xorl 48(%esp),%edx
xorl %ebx,%ebp
xorl 4(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,16(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 20(%esp),%ecx
addl %ebp,%edx
# 20_39 21
movl %edi,%ebp
xorl 28(%esp),%ecx
xorl %esi,%ebp
xorl 52(%esp),%ecx
xorl %eax,%ebp
xorl 8(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,20(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 24(%esp),%ebx
addl %ebp,%ecx
# 20_39 22
movl %edx,%ebp
xorl 32(%esp),%ebx
xorl %edi,%ebp
xorl 56(%esp),%ebx
xorl %esi,%ebp
xorl 12(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,24(%esp)
leal 1859775393(%ebx,%eax,1),%ebx
movl 28(%esp),%eax
addl %ebp,%ebx
# 20_39 23
movl %ecx,%ebp
xorl 36(%esp),%eax
xorl %edx,%ebp
xorl 60(%esp),%eax
xorl %edi,%ebp
xorl 16(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,28(%esp)
leal 1859775393(%eax,%esi,1),%eax
movl 32(%esp),%esi
addl %ebp,%eax
# 20_39 24
movl %ebx,%ebp
xorl 40(%esp),%esi
xorl %ecx,%ebp
xorl (%esp),%esi
xorl %edx,%ebp
xorl 20(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,32(%esp)
leal 1859775393(%esi,%edi,1),%esi
movl 36(%esp),%edi
addl %ebp,%esi
# 20_39 25
movl %eax,%ebp
xorl 44(%esp),%edi
xorl %ebx,%ebp
xorl 4(%esp),%edi
xorl %ecx,%ebp
xorl 24(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,36(%esp)
leal 1859775393(%edi,%edx,1),%edi
movl 40(%esp),%edx
addl %ebp,%edi
# 20_39 26
movl %esi,%ebp
xorl 48(%esp),%edx
xorl %eax,%ebp
xorl 8(%esp),%edx
xorl %ebx,%ebp
xorl 28(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,40(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 44(%esp),%ecx
addl %ebp,%edx
# 20_39 27
movl %edi,%ebp
xorl 52(%esp),%ecx
xorl %esi,%ebp
xorl 12(%esp),%ecx
xorl %eax,%ebp
xorl 32(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,44(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 48(%esp),%ebx
addl %ebp,%ecx
# 20_39 28
movl %edx,%ebp
xorl 56(%esp),%ebx
xorl %edi,%ebp
xorl 16(%esp),%ebx
xorl %esi,%ebp
xorl 36(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,48(%esp)
leal 1859775393(%ebx,%eax,1),%ebx
movl 52(%esp),%eax
addl %ebp,%ebx
# 20_39 29
movl %ecx,%ebp
xorl 60(%esp),%eax
xorl %edx,%ebp
xorl 20(%esp),%eax
xorl %edi,%ebp
xorl 40(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,52(%esp)
leal 1859775393(%eax,%esi,1),%eax
movl 56(%esp),%esi
addl %ebp,%eax
# 20_39 30
movl %ebx,%ebp
xorl (%esp),%esi
xorl %ecx,%ebp
xorl 24(%esp),%esi
xorl %edx,%ebp
xorl 44(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,56(%esp)
leal 1859775393(%esi,%edi,1),%esi
movl 60(%esp),%edi
addl %ebp,%esi
# 20_39 31
movl %eax,%ebp
xorl 4(%esp),%edi
xorl %ebx,%ebp
xorl 28(%esp),%edi
xorl %ecx,%ebp
xorl 48(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,60(%esp)
leal 1859775393(%edi,%edx,1),%edi
movl (%esp),%edx
addl %ebp,%edi
# 20_39 32
movl %esi,%ebp
xorl 8(%esp),%edx
xorl %eax,%ebp
xorl 32(%esp),%edx
xorl %ebx,%ebp
xorl 52(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 4(%esp),%ecx
addl %ebp,%edx
# 20_39 33
movl %edi,%ebp
xorl 12(%esp),%ecx
xorl %esi,%ebp
xorl 36(%esp),%ecx
xorl %eax,%ebp
xorl 56(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,4(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 8(%esp),%ebx
addl %ebp,%ecx
# 20_39 34
movl %edx,%ebp
xorl 16(%esp),%ebx
xorl %edi,%ebp
xorl 40(%esp),%ebx
xorl %esi,%ebp
xorl 60(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,8(%esp)
leal 1859775393(%ebx,%eax,1),%ebx
movl 12(%esp),%eax
addl %ebp,%ebx
# 20_39 35
movl %ecx,%ebp
xorl 20(%esp),%eax
xorl %edx,%ebp
xorl 44(%esp),%eax
xorl %edi,%ebp
xorl (%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,12(%esp)
leal 1859775393(%eax,%esi,1),%eax
movl 16(%esp),%esi
addl %ebp,%eax
# 20_39 36
movl %ebx,%ebp
xorl 24(%esp),%esi
xorl %ecx,%ebp
xorl 48(%esp),%esi
xorl %edx,%ebp
xorl 4(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,16(%esp)
leal 1859775393(%esi,%edi,1),%esi
movl 20(%esp),%edi
addl %ebp,%esi
# 20_39 37
movl %eax,%ebp
xorl 28(%esp),%edi
xorl %ebx,%ebp
xorl 52(%esp),%edi
xorl %ecx,%ebp
xorl 8(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,20(%esp)
leal 1859775393(%edi,%edx,1),%edi
movl 24(%esp),%edx
addl %ebp,%edi
# 20_39 38
movl %esi,%ebp
xorl 32(%esp),%edx
xorl %eax,%ebp
xorl 56(%esp),%edx
xorl %ebx,%ebp
xorl 12(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,24(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 28(%esp),%ecx
addl %ebp,%edx
# 20_39 39
movl %edi,%ebp
xorl 36(%esp),%ecx
xorl %esi,%ebp
xorl 60(%esp),%ecx
xorl %eax,%ebp
xorl 16(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,28(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 32(%esp),%ebx
addl %ebp,%ecx
# 40_59 40
movl %edi,%ebp
xorl 40(%esp),%ebx
xorl %esi,%ebp
xorl (%esp),%ebx
andl %edx,%ebp
xorl 20(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,32(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 36(%esp),%eax
addl %ebp,%ebx
# 40_59 41
movl %edx,%ebp
xorl 44(%esp),%eax
xorl %edi,%ebp
xorl 4(%esp),%eax
andl %ecx,%ebp
xorl 24(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,36(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl 40(%esp),%esi
addl %ebp,%eax
# 40_59 42
movl %ecx,%ebp
xorl 48(%esp),%esi
xorl %edx,%ebp
xorl 8(%esp),%esi
andl %ebx,%ebp
xorl 28(%esp),%esi
roll $1,%esi
addl %edi,%ebp
rorl $2,%ebx
movl %eax,%edi
roll $5,%edi
movl %esi,40(%esp)
leal 2400959708(%esi,%ebp,1),%esi
movl %ecx,%ebp
addl %edi,%esi
andl %edx,%ebp
movl 44(%esp),%edi
addl %ebp,%esi
# 40_59 43
movl %ebx,%ebp
xorl 52(%esp),%edi
xorl %ecx,%ebp
xorl 12(%esp),%edi
andl %eax,%ebp
xorl 32(%esp),%edi
roll $1,%edi
addl %edx,%ebp
rorl $2,%eax
movl %esi,%edx
roll $5,%edx
movl %edi,44(%esp)
leal 2400959708(%edi,%ebp,1),%edi
movl %ebx,%ebp
addl %edx,%edi
andl %ecx,%ebp
movl 48(%esp),%edx
addl %ebp,%edi
# 40_59 44
movl %eax,%ebp
xorl 56(%esp),%edx
xorl %ebx,%ebp
xorl 16(%esp),%edx
andl %esi,%ebp
xorl 36(%esp),%edx
roll $1,%edx
addl %ecx,%ebp
rorl $2,%esi
movl %edi,%ecx
roll $5,%ecx
movl %edx,48(%esp)
leal 2400959708(%edx,%ebp,1),%edx
movl %eax,%ebp
addl %ecx,%edx
andl %ebx,%ebp
movl 52(%esp),%ecx
addl %ebp,%edx
# 40_59 45
movl %esi,%ebp
xorl 60(%esp),%ecx
xorl %eax,%ebp
xorl 20(%esp),%ecx
andl %edi,%ebp
xorl 40(%esp),%ecx
roll $1,%ecx
addl %ebx,%ebp
rorl $2,%edi
movl %edx,%ebx
roll $5,%ebx
movl %ecx,52(%esp)
leal 2400959708(%ecx,%ebp,1),%ecx
movl %esi,%ebp
addl %ebx,%ecx
andl %eax,%ebp
movl 56(%esp),%ebx
addl %ebp,%ecx
# 40_59 46
movl %edi,%ebp
xorl (%esp),%ebx
xorl %esi,%ebp
xorl 24(%esp),%ebx
andl %edx,%ebp
xorl 44(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,56(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 60(%esp),%eax
addl %ebp,%ebx
# 40_59 47
movl %edx,%ebp
xorl 4(%esp),%eax
xorl %edi,%ebp
xorl 28(%esp),%eax
andl %ecx,%ebp
xorl 48(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,60(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl (%esp),%esi
addl %ebp,%eax
# 40_59 48
movl %ecx,%ebp
xorl 8(%esp),%esi
xorl %edx,%ebp
xorl 32(%esp),%esi
andl %ebx,%ebp
xorl 52(%esp),%esi
roll $1,%esi
addl %edi,%ebp
rorl $2,%ebx
movl %eax,%edi
roll $5,%edi
movl %esi,(%esp)
leal 2400959708(%esi,%ebp,1),%esi
movl %ecx,%ebp
addl %edi,%esi
andl %edx,%ebp
movl 4(%esp),%edi
addl %ebp,%esi
# 40_59 49
movl %ebx,%ebp
xorl 12(%esp),%edi
xorl %ecx,%ebp
xorl 36(%esp),%edi
andl %eax,%ebp
xorl 56(%esp),%edi
roll $1,%edi
addl %edx,%ebp
rorl $2,%eax
movl %esi,%edx
roll $5,%edx
movl %edi,4(%esp)
leal 2400959708(%edi,%ebp,1),%edi
movl %ebx,%ebp
addl %edx,%edi
andl %ecx,%ebp
movl 8(%esp),%edx
addl %ebp,%edi
# 40_59 50
movl %eax,%ebp
xorl 16(%esp),%edx
xorl %ebx,%ebp
xorl 40(%esp),%edx
andl %esi,%ebp
xorl 60(%esp),%edx
roll $1,%edx
addl %ecx,%ebp
rorl $2,%esi
movl %edi,%ecx
roll $5,%ecx
movl %edx,8(%esp)
leal 2400959708(%edx,%ebp,1),%edx
movl %eax,%ebp
addl %ecx,%edx
andl %ebx,%ebp
movl 12(%esp),%ecx
addl %ebp,%edx
# 40_59 51
movl %esi,%ebp
xorl 20(%esp),%ecx
xorl %eax,%ebp
xorl 44(%esp),%ecx
andl %edi,%ebp
xorl (%esp),%ecx
roll $1,%ecx
addl %ebx,%ebp
rorl $2,%edi
movl %edx,%ebx
roll $5,%ebx
movl %ecx,12(%esp)
leal 2400959708(%ecx,%ebp,1),%ecx
movl %esi,%ebp
addl %ebx,%ecx
andl %eax,%ebp
movl 16(%esp),%ebx
addl %ebp,%ecx
# 40_59 52
movl %edi,%ebp
xorl 24(%esp),%ebx
xorl %esi,%ebp
xorl 48(%esp),%ebx
andl %edx,%ebp
xorl 4(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,16(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 20(%esp),%eax
addl %ebp,%ebx
# 40_59 53
movl %edx,%ebp
xorl 28(%esp),%eax
xorl %edi,%ebp
xorl 52(%esp),%eax
andl %ecx,%ebp
xorl 8(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,20(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl 24(%esp),%esi
addl %ebp,%eax
# 40_59 54
movl %ecx,%ebp
xorl 32(%esp),%esi
xorl %edx,%ebp
xorl 56(%esp),%esi
andl %ebx,%ebp
xorl 12(%esp),%esi
roll $1,%esi
addl %edi,%ebp
rorl $2,%ebx
movl %eax,%edi
roll $5,%edi
movl %esi,24(%esp)
leal 2400959708(%esi,%ebp,1),%esi
movl %ecx,%ebp
addl %edi,%esi
andl %edx,%ebp
movl 28(%esp),%edi
addl %ebp,%esi
# 40_59 55
movl %ebx,%ebp
xorl 36(%esp),%edi
xorl %ecx,%ebp
xorl 60(%esp),%edi
andl %eax,%ebp
xorl 16(%esp),%edi
roll $1,%edi
addl %edx,%ebp
rorl $2,%eax
movl %esi,%edx
roll $5,%edx
movl %edi,28(%esp)
leal 2400959708(%edi,%ebp,1),%edi
movl %ebx,%ebp
addl %edx,%edi
andl %ecx,%ebp
movl 32(%esp),%edx
addl %ebp,%edi
# 40_59 56
movl %eax,%ebp
xorl 40(%esp),%edx
xorl %ebx,%ebp
xorl (%esp),%edx
andl %esi,%ebp
xorl 20(%esp),%edx
roll $1,%edx
addl %ecx,%ebp
rorl $2,%esi
movl %edi,%ecx
roll $5,%ecx
movl %edx,32(%esp)
leal 2400959708(%edx,%ebp,1),%edx
movl %eax,%ebp
addl %ecx,%edx
andl %ebx,%ebp
movl 36(%esp),%ecx
addl %ebp,%edx
# 40_59 57
movl %esi,%ebp
xorl 44(%esp),%ecx
xorl %eax,%ebp
xorl 4(%esp),%ecx
andl %edi,%ebp
xorl 24(%esp),%ecx
roll $1,%ecx
addl %ebx,%ebp
rorl $2,%edi
movl %edx,%ebx
roll $5,%ebx
movl %ecx,36(%esp)
leal 2400959708(%ecx,%ebp,1),%ecx
movl %esi,%ebp
addl %ebx,%ecx
andl %eax,%ebp
movl 40(%esp),%ebx
addl %ebp,%ecx
# 40_59 58
movl %edi,%ebp
xorl 48(%esp),%ebx
xorl %esi,%ebp
xorl 8(%esp),%ebx
andl %edx,%ebp
xorl 28(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,40(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 44(%esp),%eax
addl %ebp,%ebx
# 40_59 59
movl %edx,%ebp
xorl 52(%esp),%eax
xorl %edi,%ebp
xorl 12(%esp),%eax
andl %ecx,%ebp
xorl 32(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,44(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl 48(%esp),%esi
addl %ebp,%eax
# 20_39 60
movl %ebx,%ebp
xorl 56(%esp),%esi
xorl %ecx,%ebp
xorl 16(%esp),%esi
xorl %edx,%ebp
xorl 36(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,48(%esp)
leal 3395469782(%esi,%edi,1),%esi
movl 52(%esp),%edi
addl %ebp,%esi
# 20_39 61
movl %eax,%ebp
xorl 60(%esp),%edi
xorl %ebx,%ebp
xorl 20(%esp),%edi
xorl %ecx,%ebp
xorl 40(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,52(%esp)
leal 3395469782(%edi,%edx,1),%edi
movl 56(%esp),%edx
addl %ebp,%edi
# 20_39 62
movl %esi,%ebp
xorl (%esp),%edx
xorl %eax,%ebp
xorl 24(%esp),%edx
xorl %ebx,%ebp
xorl 44(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,56(%esp)
leal 3395469782(%edx,%ecx,1),%edx
movl 60(%esp),%ecx
addl %ebp,%edx
# 20_39 63
movl %edi,%ebp
xorl 4(%esp),%ecx
xorl %esi,%ebp
xorl 28(%esp),%ecx
xorl %eax,%ebp
xorl 48(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,60(%esp)
leal 3395469782(%ecx,%ebx,1),%ecx
movl (%esp),%ebx
addl %ebp,%ecx
# 20_39 64
movl %edx,%ebp
xorl 8(%esp),%ebx
xorl %edi,%ebp
xorl 32(%esp),%ebx
xorl %esi,%ebp
xorl 52(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,(%esp)
leal 3395469782(%ebx,%eax,1),%ebx
movl 4(%esp),%eax
addl %ebp,%ebx
# 20_39 65
movl %ecx,%ebp
xorl 12(%esp),%eax
xorl %edx,%ebp
xorl 36(%esp),%eax
xorl %edi,%ebp
xorl 56(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,4(%esp)
leal 3395469782(%eax,%esi,1),%eax
movl 8(%esp),%esi
addl %ebp,%eax
# 20_39 66
movl %ebx,%ebp
xorl 16(%esp),%esi
xorl %ecx,%ebp
xorl 40(%esp),%esi
xorl %edx,%ebp
xorl 60(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,8(%esp)
leal 3395469782(%esi,%edi,1),%esi
movl 12(%esp),%edi
addl %ebp,%esi
# 20_39 67
movl %eax,%ebp
xorl 20(%esp),%edi
xorl %ebx,%ebp
xorl 44(%esp),%edi
xorl %ecx,%ebp
xorl (%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,12(%esp)
leal 3395469782(%edi,%edx,1),%edi
movl 16(%esp),%edx
addl %ebp,%edi
# 20_39 68
movl %esi,%ebp
xorl 24(%esp),%edx
xorl %eax,%ebp
xorl 48(%esp),%edx
xorl %ebx,%ebp
xorl 4(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,16(%esp)
leal 3395469782(%edx,%ecx,1),%edx
movl 20(%esp),%ecx
addl %ebp,%edx
# 20_39 69
movl %edi,%ebp
xorl 28(%esp),%ecx
xorl %esi,%ebp
xorl 52(%esp),%ecx
xorl %eax,%ebp
xorl 8(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,20(%esp)
leal 3395469782(%ecx,%ebx,1),%ecx
movl 24(%esp),%ebx
addl %ebp,%ecx
# 20_39 70
movl %edx,%ebp
xorl 32(%esp),%ebx
xorl %edi,%ebp
xorl 56(%esp),%ebx
xorl %esi,%ebp
xorl 12(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,24(%esp)
leal 3395469782(%ebx,%eax,1),%ebx
movl 28(%esp),%eax
addl %ebp,%ebx
# 20_39 71
movl %ecx,%ebp
xorl 36(%esp),%eax
xorl %edx,%ebp
xorl 60(%esp),%eax
xorl %edi,%ebp
xorl 16(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,28(%esp)
leal 3395469782(%eax,%esi,1),%eax
movl 32(%esp),%esi
addl %ebp,%eax
# 20_39 72
movl %ebx,%ebp
xorl 40(%esp),%esi
xorl %ecx,%ebp
xorl (%esp),%esi
xorl %edx,%ebp
xorl 20(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,32(%esp)
leal 3395469782(%esi,%edi,1),%esi
movl 36(%esp),%edi
addl %ebp,%esi
# 20_39 73
movl %eax,%ebp
xorl 44(%esp),%edi
xorl %ebx,%ebp
xorl 4(%esp),%edi
xorl %ecx,%ebp
xorl 24(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,36(%esp)
leal 3395469782(%edi,%edx,1),%edi
movl 40(%esp),%edx
addl %ebp,%edi
# 20_39 74
movl %esi,%ebp
xorl 48(%esp),%edx
xorl %eax,%ebp
xorl 8(%esp),%edx
xorl %ebx,%ebp
xorl 28(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,40(%esp)
leal 3395469782(%edx,%ecx,1),%edx
movl 44(%esp),%ecx
addl %ebp,%edx
# 20_39 75
movl %edi,%ebp
xorl 52(%esp),%ecx
xorl %esi,%ebp
xorl 12(%esp),%ecx
xorl %eax,%ebp
xorl 32(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,44(%esp)
leal 3395469782(%ecx,%ebx,1),%ecx
movl 48(%esp),%ebx
addl %ebp,%ecx
# 20_39 76
movl %edx,%ebp
xorl 56(%esp),%ebx
xorl %edi,%ebp
xorl 16(%esp),%ebx
xorl %esi,%ebp
xorl 36(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,48(%esp)
leal 3395469782(%ebx,%eax,1),%ebx
movl 52(%esp),%eax
addl %ebp,%ebx
# 20_39 77
movl %ecx,%ebp
xorl 60(%esp),%eax
xorl %edx,%ebp
xorl 20(%esp),%eax
xorl %edi,%ebp
xorl 40(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
leal 3395469782(%eax,%esi,1),%eax
movl 56(%esp),%esi
addl %ebp,%eax
# 20_39 78
movl %ebx,%ebp
xorl (%esp),%esi
xorl %ecx,%ebp
xorl 24(%esp),%esi
xorl %edx,%ebp
xorl 44(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
leal 3395469782(%esi,%edi,1),%esi
movl 60(%esp),%edi
addl %ebp,%esi
# 20_39 79
movl %eax,%ebp
xorl 4(%esp),%edi
xorl %ebx,%ebp
xorl 28(%esp),%edi
xorl %ecx,%ebp
xorl 48(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
leal 3395469782(%edi,%edx,1),%edi
addl %ebp,%edi
movl 96(%esp),%ebp
movl 100(%esp),%edx
addl (%ebp),%edi
addl 4(%ebp),%esi
addl 8(%ebp),%eax
addl 12(%ebp),%ebx
addl 16(%ebp),%ecx
movl %edi,(%ebp)
addl $64,%edx
movl %esi,4(%ebp)
cmpl 104(%esp),%edx
movl %eax,8(%ebp)
movl %ecx,%edi
movl %ebx,12(%ebp)
movl %edx,%esi
movl %ecx,16(%ebp)
jb L000loop
addl $76,%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _sha1_block_data_order_ssse3
.private_extern _sha1_block_data_order_ssse3
.align 4
_sha1_block_data_order_ssse3:
L_sha1_block_data_order_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
call L001pic_point
L001pic_point:
popl %ebp
leal LK_XX_XX-L001pic_point(%ebp),%ebp
movdqa (%ebp),%xmm7
movdqa 16(%ebp),%xmm0
movdqa 32(%ebp),%xmm1
movdqa 48(%ebp),%xmm2
movdqa 64(%ebp),%xmm6
movl 20(%esp),%edi
movl 24(%esp),%ebp
movl 28(%esp),%edx
movl %esp,%esi
subl $208,%esp
andl $-64,%esp
movdqa %xmm0,112(%esp)
movdqa %xmm1,128(%esp)
movdqa %xmm2,144(%esp)
shll $6,%edx
movdqa %xmm7,160(%esp)
addl %ebp,%edx
movdqa %xmm6,176(%esp)
addl $64,%ebp
movl %edi,192(%esp)
movl %ebp,196(%esp)
movl %edx,200(%esp)
movl %esi,204(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
movl 16(%edi),%edi
movl %ebx,%esi
movdqu -64(%ebp),%xmm0
movdqu -48(%ebp),%xmm1
movdqu -32(%ebp),%xmm2
movdqu -16(%ebp),%xmm3
.byte 102,15,56,0,198
.byte 102,15,56,0,206
.byte 102,15,56,0,214
movdqa %xmm7,96(%esp)
.byte 102,15,56,0,222
paddd %xmm7,%xmm0
paddd %xmm7,%xmm1
paddd %xmm7,%xmm2
movdqa %xmm0,(%esp)
psubd %xmm7,%xmm0
movdqa %xmm1,16(%esp)
psubd %xmm7,%xmm1
movdqa %xmm2,32(%esp)
movl %ecx,%ebp
psubd %xmm7,%xmm2
xorl %edx,%ebp
pshufd $238,%xmm0,%xmm4
andl %ebp,%esi
jmp L002loop
.align 4,0x90
L002loop:
rorl $2,%ebx
xorl %edx,%esi
movl %eax,%ebp
punpcklqdq %xmm1,%xmm4
movdqa %xmm3,%xmm6
addl (%esp),%edi
xorl %ecx,%ebx
paddd %xmm3,%xmm7
movdqa %xmm0,64(%esp)
roll $5,%eax
addl %esi,%edi
psrldq $4,%xmm6
andl %ebx,%ebp
xorl %ecx,%ebx
pxor %xmm0,%xmm4
addl %eax,%edi
rorl $7,%eax
pxor %xmm2,%xmm6
xorl %ecx,%ebp
movl %edi,%esi
addl 4(%esp),%edx
pxor %xmm6,%xmm4
xorl %ebx,%eax
roll $5,%edi
movdqa %xmm7,48(%esp)
addl %ebp,%edx
andl %eax,%esi
movdqa %xmm4,%xmm0
xorl %ebx,%eax
addl %edi,%edx
rorl $7,%edi
movdqa %xmm4,%xmm6
xorl %ebx,%esi
pslldq $12,%xmm0
paddd %xmm4,%xmm4
movl %edx,%ebp
addl 8(%esp),%ecx
psrld $31,%xmm6
xorl %eax,%edi
roll $5,%edx
movdqa %xmm0,%xmm7
addl %esi,%ecx
andl %edi,%ebp
xorl %eax,%edi
psrld $30,%xmm0
addl %edx,%ecx
rorl $7,%edx
por %xmm6,%xmm4
xorl %eax,%ebp
movl %ecx,%esi
addl 12(%esp),%ebx
pslld $2,%xmm7
xorl %edi,%edx
roll $5,%ecx
pxor %xmm0,%xmm4
movdqa 96(%esp),%xmm0
addl %ebp,%ebx
andl %edx,%esi
pxor %xmm7,%xmm4
pshufd $238,%xmm1,%xmm5
xorl %edi,%edx
addl %ecx,%ebx
rorl $7,%ecx
xorl %edi,%esi
movl %ebx,%ebp
punpcklqdq %xmm2,%xmm5
movdqa %xmm4,%xmm7
addl 16(%esp),%eax
xorl %edx,%ecx
paddd %xmm4,%xmm0
movdqa %xmm1,80(%esp)
roll $5,%ebx
addl %esi,%eax
psrldq $4,%xmm7
andl %ecx,%ebp
xorl %edx,%ecx
pxor %xmm1,%xmm5
addl %ebx,%eax
rorl $7,%ebx
pxor %xmm3,%xmm7
xorl %edx,%ebp
movl %eax,%esi
addl 20(%esp),%edi
pxor %xmm7,%xmm5
xorl %ecx,%ebx
roll $5,%eax
movdqa %xmm0,(%esp)
addl %ebp,%edi
andl %ebx,%esi
movdqa %xmm5,%xmm1
xorl %ecx,%ebx
addl %eax,%edi
rorl $7,%eax
movdqa %xmm5,%xmm7
xorl %ecx,%esi
pslldq $12,%xmm1
paddd %xmm5,%xmm5
movl %edi,%ebp
addl 24(%esp),%edx
psrld $31,%xmm7
xorl %ebx,%eax
roll $5,%edi
movdqa %xmm1,%xmm0
addl %esi,%edx
andl %eax,%ebp
xorl %ebx,%eax
psrld $30,%xmm1
addl %edi,%edx
rorl $7,%edi
por %xmm7,%xmm5
xorl %ebx,%ebp
movl %edx,%esi
addl 28(%esp),%ecx
pslld $2,%xmm0
xorl %eax,%edi
roll $5,%edx
pxor %xmm1,%xmm5
movdqa 112(%esp),%xmm1
addl %ebp,%ecx
andl %edi,%esi
pxor %xmm0,%xmm5
pshufd $238,%xmm2,%xmm6
xorl %eax,%edi
addl %edx,%ecx
rorl $7,%edx
xorl %eax,%esi
movl %ecx,%ebp
punpcklqdq %xmm3,%xmm6
movdqa %xmm5,%xmm0
addl 32(%esp),%ebx
xorl %edi,%edx
paddd %xmm5,%xmm1
movdqa %xmm2,96(%esp)
roll $5,%ecx
addl %esi,%ebx
psrldq $4,%xmm0
andl %edx,%ebp
xorl %edi,%edx
pxor %xmm2,%xmm6
addl %ecx,%ebx
rorl $7,%ecx
pxor %xmm4,%xmm0
xorl %edi,%ebp
movl %ebx,%esi
addl 36(%esp),%eax
pxor %xmm0,%xmm6
xorl %edx,%ecx
roll $5,%ebx
movdqa %xmm1,16(%esp)
addl %ebp,%eax
andl %ecx,%esi
movdqa %xmm6,%xmm2
xorl %edx,%ecx
addl %ebx,%eax
rorl $7,%ebx
movdqa %xmm6,%xmm0
xorl %edx,%esi
pslldq $12,%xmm2
paddd %xmm6,%xmm6
movl %eax,%ebp
addl 40(%esp),%edi
psrld $31,%xmm0
xorl %ecx,%ebx
roll $5,%eax
movdqa %xmm2,%xmm1
addl %esi,%edi
andl %ebx,%ebp
xorl %ecx,%ebx
psrld $30,%xmm2
addl %eax,%edi
rorl $7,%eax
por %xmm0,%xmm6
xorl %ecx,%ebp
movdqa 64(%esp),%xmm0
movl %edi,%esi
addl 44(%esp),%edx
pslld $2,%xmm1
xorl %ebx,%eax
roll $5,%edi
pxor %xmm2,%xmm6
movdqa 112(%esp),%xmm2
addl %ebp,%edx
andl %eax,%esi
pxor %xmm1,%xmm6
pshufd $238,%xmm3,%xmm7
xorl %ebx,%eax
addl %edi,%edx
rorl $7,%edi
xorl %ebx,%esi
movl %edx,%ebp
punpcklqdq %xmm4,%xmm7
movdqa %xmm6,%xmm1
addl 48(%esp),%ecx
xorl %eax,%edi
paddd %xmm6,%xmm2
movdqa %xmm3,64(%esp)
roll $5,%edx
addl %esi,%ecx
psrldq $4,%xmm1
andl %edi,%ebp
xorl %eax,%edi
pxor %xmm3,%xmm7
addl %edx,%ecx
rorl $7,%edx
pxor %xmm5,%xmm1
xorl %eax,%ebp
movl %ecx,%esi
addl 52(%esp),%ebx
pxor %xmm1,%xmm7
xorl %edi,%edx
roll $5,%ecx
movdqa %xmm2,32(%esp)
addl %ebp,%ebx
andl %edx,%esi
movdqa %xmm7,%xmm3
xorl %edi,%edx
addl %ecx,%ebx
rorl $7,%ecx
movdqa %xmm7,%xmm1
xorl %edi,%esi
pslldq $12,%xmm3
paddd %xmm7,%xmm7
movl %ebx,%ebp
addl 56(%esp),%eax
psrld $31,%xmm1
xorl %edx,%ecx
roll $5,%ebx
movdqa %xmm3,%xmm2
addl %esi,%eax
andl %ecx,%ebp
xorl %edx,%ecx
psrld $30,%xmm3
addl %ebx,%eax
rorl $7,%ebx
por %xmm1,%xmm7
xorl %edx,%ebp
movdqa 80(%esp),%xmm1
movl %eax,%esi
addl 60(%esp),%edi
pslld $2,%xmm2
xorl %ecx,%ebx
roll $5,%eax
pxor %xmm3,%xmm7
movdqa 112(%esp),%xmm3
addl %ebp,%edi
andl %ebx,%esi
pxor %xmm2,%xmm7
pshufd $238,%xmm6,%xmm2
xorl %ecx,%ebx
addl %eax,%edi
rorl $7,%eax
pxor %xmm4,%xmm0
punpcklqdq %xmm7,%xmm2
xorl %ecx,%esi
movl %edi,%ebp
addl (%esp),%edx
pxor %xmm1,%xmm0
movdqa %xmm4,80(%esp)
xorl %ebx,%eax
roll $5,%edi
movdqa %xmm3,%xmm4
addl %esi,%edx
paddd %xmm7,%xmm3
andl %eax,%ebp
pxor %xmm2,%xmm0
xorl %ebx,%eax
addl %edi,%edx
rorl $7,%edi
xorl %ebx,%ebp
movdqa %xmm0,%xmm2
movdqa %xmm3,48(%esp)
movl %edx,%esi
addl 4(%esp),%ecx
xorl %eax,%edi
roll $5,%edx
pslld $2,%xmm0
addl %ebp,%ecx
andl %edi,%esi
psrld $30,%xmm2
xorl %eax,%edi
addl %edx,%ecx
rorl $7,%edx
xorl %eax,%esi
movl %ecx,%ebp
addl 8(%esp),%ebx
xorl %edi,%edx
roll $5,%ecx
por %xmm2,%xmm0
addl %esi,%ebx
andl %edx,%ebp
movdqa 96(%esp),%xmm2
xorl %edi,%edx
addl %ecx,%ebx
addl 12(%esp),%eax
xorl %edi,%ebp
movl %ebx,%esi
pshufd $238,%xmm7,%xmm3
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
addl 16(%esp),%edi
pxor %xmm5,%xmm1
punpcklqdq %xmm0,%xmm3
xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
pxor %xmm2,%xmm1
movdqa %xmm5,96(%esp)
addl %esi,%edi
xorl %ecx,%ebp
movdqa %xmm4,%xmm5
rorl $7,%ebx
paddd %xmm0,%xmm4
addl %eax,%edi
pxor %xmm3,%xmm1
addl 20(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
movdqa %xmm1,%xmm3
movdqa %xmm4,(%esp)
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
pslld $2,%xmm1
addl 24(%esp),%ecx
xorl %eax,%esi
psrld $30,%xmm3
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
addl %edx,%ecx
por %xmm3,%xmm1
addl 28(%esp),%ebx
xorl %edi,%ebp
movdqa 64(%esp),%xmm3
movl %ecx,%esi
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
pshufd $238,%xmm0,%xmm4
addl %ecx,%ebx
addl 32(%esp),%eax
pxor %xmm6,%xmm2
punpcklqdq %xmm1,%xmm4
xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
pxor %xmm3,%xmm2
movdqa %xmm6,64(%esp)
addl %esi,%eax
xorl %edx,%ebp
movdqa 128(%esp),%xmm6
rorl $7,%ecx
paddd %xmm1,%xmm5
addl %ebx,%eax
pxor %xmm4,%xmm2
addl 36(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
movdqa %xmm2,%xmm4
movdqa %xmm5,16(%esp)
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
pslld $2,%xmm2
addl 40(%esp),%edx
xorl %ebx,%esi
psrld $30,%xmm4
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
addl %edi,%edx
por %xmm4,%xmm2
addl 44(%esp),%ecx
xorl %eax,%ebp
movdqa 80(%esp),%xmm4
movl %edx,%esi
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
pshufd $238,%xmm1,%xmm5
addl %edx,%ecx
addl 48(%esp),%ebx
pxor %xmm7,%xmm3
punpcklqdq %xmm2,%xmm5
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
pxor %xmm4,%xmm3
movdqa %xmm7,80(%esp)
addl %esi,%ebx
xorl %edi,%ebp
movdqa %xmm6,%xmm7
rorl $7,%edx
paddd %xmm2,%xmm6
addl %ecx,%ebx
pxor %xmm5,%xmm3
addl 52(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
movdqa %xmm3,%xmm5
movdqa %xmm6,32(%esp)
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
pslld $2,%xmm3
addl 56(%esp),%edi
xorl %ecx,%esi
psrld $30,%xmm5
movl %eax,%ebp
roll $5,%eax
addl %esi,%edi
xorl %ecx,%ebp
rorl $7,%ebx
addl %eax,%edi
por %xmm5,%xmm3
addl 60(%esp),%edx
xorl %ebx,%ebp
movdqa 96(%esp),%xmm5
movl %edi,%esi
roll $5,%edi
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
pshufd $238,%xmm2,%xmm6
addl %edi,%edx
addl (%esp),%ecx
pxor %xmm0,%xmm4
punpcklqdq %xmm3,%xmm6
xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
pxor %xmm5,%xmm4
movdqa %xmm0,96(%esp)
addl %esi,%ecx
xorl %eax,%ebp
movdqa %xmm7,%xmm0
rorl $7,%edi
paddd %xmm3,%xmm7
addl %edx,%ecx
pxor %xmm6,%xmm4
addl 4(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
movdqa %xmm4,%xmm6
movdqa %xmm7,48(%esp)
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
addl %ecx,%ebx
pslld $2,%xmm4
addl 8(%esp),%eax
xorl %edx,%esi
psrld $30,%xmm6
movl %ebx,%ebp
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
addl %ebx,%eax
por %xmm6,%xmm4
addl 12(%esp),%edi
xorl %ecx,%ebp
movdqa 64(%esp),%xmm6
movl %eax,%esi
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
pshufd $238,%xmm3,%xmm7
addl %eax,%edi
addl 16(%esp),%edx
pxor %xmm1,%xmm5
punpcklqdq %xmm4,%xmm7
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
pxor %xmm6,%xmm5
movdqa %xmm1,64(%esp)
addl %esi,%edx
xorl %ebx,%ebp
movdqa %xmm0,%xmm1
rorl $7,%eax
paddd %xmm4,%xmm0
addl %edi,%edx
pxor %xmm7,%xmm5
addl 20(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
movdqa %xmm5,%xmm7
movdqa %xmm0,(%esp)
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
addl %edx,%ecx
pslld $2,%xmm5
addl 24(%esp),%ebx
xorl %edi,%esi
psrld $30,%xmm7
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
por %xmm7,%xmm5
addl 28(%esp),%eax
movdqa 80(%esp),%xmm7
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%ebp
roll $5,%ebx
pshufd $238,%xmm4,%xmm0
addl %ebp,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
addl 32(%esp),%edi
pxor %xmm2,%xmm6
punpcklqdq %xmm5,%xmm0
andl %ecx,%esi
xorl %edx,%ecx
rorl $7,%ebx
pxor %xmm7,%xmm6
movdqa %xmm2,80(%esp)
movl %eax,%ebp
xorl %ecx,%esi
roll $5,%eax
movdqa %xmm1,%xmm2
addl %esi,%edi
paddd %xmm5,%xmm1
xorl %ebx,%ebp
pxor %xmm0,%xmm6
xorl %ecx,%ebx
addl %eax,%edi
addl 36(%esp),%edx
andl %ebx,%ebp
movdqa %xmm6,%xmm0
movdqa %xmm1,16(%esp)
xorl %ecx,%ebx
rorl $7,%eax
movl %edi,%esi
xorl %ebx,%ebp
roll $5,%edi
pslld $2,%xmm6
addl %ebp,%edx
xorl %eax,%esi
psrld $30,%xmm0
xorl %ebx,%eax
addl %edi,%edx
addl 40(%esp),%ecx
andl %eax,%esi
xorl %ebx,%eax
rorl $7,%edi
por %xmm0,%xmm6
movl %edx,%ebp
xorl %eax,%esi
movdqa 96(%esp),%xmm0
roll $5,%edx
addl %esi,%ecx
xorl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
pshufd $238,%xmm5,%xmm1
addl 44(%esp),%ebx
andl %edi,%ebp
xorl %eax,%edi
rorl $7,%edx
movl %ecx,%esi
xorl %edi,%ebp
roll $5,%ecx
addl %ebp,%ebx
xorl %edx,%esi
xorl %edi,%edx
addl %ecx,%ebx
addl 48(%esp),%eax
pxor %xmm3,%xmm7
punpcklqdq %xmm6,%xmm1
andl %edx,%esi
xorl %edi,%edx
rorl $7,%ecx
pxor %xmm0,%xmm7
movdqa %xmm3,96(%esp)
movl %ebx,%ebp
xorl %edx,%esi
roll $5,%ebx
movdqa 144(%esp),%xmm3
addl %esi,%eax
paddd %xmm6,%xmm2
xorl %ecx,%ebp
pxor %xmm1,%xmm7
xorl %edx,%ecx
addl %ebx,%eax
addl 52(%esp),%edi
andl %ecx,%ebp
movdqa %xmm7,%xmm1
movdqa %xmm2,32(%esp)
xorl %edx,%ecx
rorl $7,%ebx
movl %eax,%esi
xorl %ecx,%ebp
roll $5,%eax
pslld $2,%xmm7
addl %ebp,%edi
xorl %ebx,%esi
psrld $30,%xmm1
xorl %ecx,%ebx
addl %eax,%edi
addl 56(%esp),%edx
andl %ebx,%esi
xorl %ecx,%ebx
rorl $7,%eax
por %xmm1,%xmm7
movl %edi,%ebp
xorl %ebx,%esi
movdqa 64(%esp),%xmm1
roll $5,%edi
addl %esi,%edx
xorl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
pshufd $238,%xmm6,%xmm2
addl 60(%esp),%ecx
andl %eax,%ebp
xorl %ebx,%eax
rorl $7,%edi
movl %edx,%esi
xorl %eax,%ebp
roll $5,%edx
addl %ebp,%ecx
xorl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
addl (%esp),%ebx
pxor %xmm4,%xmm0
punpcklqdq %xmm7,%xmm2
andl %edi,%esi
xorl %eax,%edi
rorl $7,%edx
pxor %xmm1,%xmm0
movdqa %xmm4,64(%esp)
movl %ecx,%ebp
xorl %edi,%esi
roll $5,%ecx
movdqa %xmm3,%xmm4
addl %esi,%ebx
paddd %xmm7,%xmm3
xorl %edx,%ebp
pxor %xmm2,%xmm0
xorl %edi,%edx
addl %ecx,%ebx
addl 4(%esp),%eax
andl %edx,%ebp
movdqa %xmm0,%xmm2
movdqa %xmm3,48(%esp)
xorl %edi,%edx
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%ebp
roll $5,%ebx
pslld $2,%xmm0
addl %ebp,%eax
xorl %ecx,%esi
psrld $30,%xmm2
xorl %edx,%ecx
addl %ebx,%eax
addl 8(%esp),%edi
andl %ecx,%esi
xorl %edx,%ecx
rorl $7,%ebx
por %xmm2,%xmm0
movl %eax,%ebp
xorl %ecx,%esi
movdqa 80(%esp),%xmm2
roll $5,%eax
addl %esi,%edi
xorl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
pshufd $238,%xmm7,%xmm3
addl 12(%esp),%edx
andl %ebx,%ebp
xorl %ecx,%ebx
rorl $7,%eax
movl %edi,%esi
xorl %ebx,%ebp
roll $5,%edi
addl %ebp,%edx
xorl %eax,%esi
xorl %ebx,%eax
addl %edi,%edx
addl 16(%esp),%ecx
pxor %xmm5,%xmm1
punpcklqdq %xmm0,%xmm3
andl %eax,%esi
xorl %ebx,%eax
rorl $7,%edi
pxor %xmm2,%xmm1
movdqa %xmm5,80(%esp)
movl %edx,%ebp
xorl %eax,%esi
roll $5,%edx
movdqa %xmm4,%xmm5
addl %esi,%ecx
paddd %xmm0,%xmm4
xorl %edi,%ebp
pxor %xmm3,%xmm1
xorl %eax,%edi
addl %edx,%ecx
addl 20(%esp),%ebx
andl %edi,%ebp
movdqa %xmm1,%xmm3
movdqa %xmm4,(%esp)
xorl %eax,%edi
rorl $7,%edx
movl %ecx,%esi
xorl %edi,%ebp
roll $5,%ecx
pslld $2,%xmm1
addl %ebp,%ebx
xorl %edx,%esi
psrld $30,%xmm3
xorl %edi,%edx
addl %ecx,%ebx
addl 24(%esp),%eax
andl %edx,%esi
xorl %edi,%edx
rorl $7,%ecx
por %xmm3,%xmm1
movl %ebx,%ebp
xorl %edx,%esi
movdqa 96(%esp),%xmm3
roll $5,%ebx
addl %esi,%eax
xorl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
pshufd $238,%xmm0,%xmm4
addl 28(%esp),%edi
andl %ecx,%ebp
xorl %edx,%ecx
rorl $7,%ebx
movl %eax,%esi
xorl %ecx,%ebp
roll $5,%eax
addl %ebp,%edi
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%edi
addl 32(%esp),%edx
pxor %xmm6,%xmm2
punpcklqdq %xmm1,%xmm4
andl %ebx,%esi
xorl %ecx,%ebx
rorl $7,%eax
pxor %xmm3,%xmm2
movdqa %xmm6,96(%esp)
movl %edi,%ebp
xorl %ebx,%esi
roll $5,%edi
movdqa %xmm5,%xmm6
addl %esi,%edx
paddd %xmm1,%xmm5
xorl %eax,%ebp
pxor %xmm4,%xmm2
xorl %ebx,%eax
addl %edi,%edx
addl 36(%esp),%ecx
andl %eax,%ebp
movdqa %xmm2,%xmm4
movdqa %xmm5,16(%esp)
xorl %ebx,%eax
rorl $7,%edi
movl %edx,%esi
xorl %eax,%ebp
roll $5,%edx
pslld $2,%xmm2
addl %ebp,%ecx
xorl %edi,%esi
psrld $30,%xmm4
xorl %eax,%edi
addl %edx,%ecx
addl 40(%esp),%ebx
andl %edi,%esi
xorl %eax,%edi
rorl $7,%edx
por %xmm4,%xmm2
movl %ecx,%ebp
xorl %edi,%esi
movdqa 64(%esp),%xmm4
roll $5,%ecx
addl %esi,%ebx
xorl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
pshufd $238,%xmm1,%xmm5
addl 44(%esp),%eax
andl %edx,%ebp
xorl %edi,%edx
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%ebp
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
addl %ebx,%eax
addl 48(%esp),%edi
pxor %xmm7,%xmm3
punpcklqdq %xmm2,%xmm5
xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
pxor %xmm4,%xmm3
movdqa %xmm7,64(%esp)
addl %esi,%edi
xorl %ecx,%ebp
movdqa %xmm6,%xmm7
rorl $7,%ebx
paddd %xmm2,%xmm6
addl %eax,%edi
pxor %xmm5,%xmm3
addl 52(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
movdqa %xmm3,%xmm5
movdqa %xmm6,32(%esp)
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
pslld $2,%xmm3
addl 56(%esp),%ecx
xorl %eax,%esi
psrld $30,%xmm5
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
addl %edx,%ecx
por %xmm5,%xmm3
addl 60(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
addl %ecx,%ebx
addl (%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
paddd %xmm3,%xmm7
addl %ebx,%eax
addl 4(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
movdqa %xmm7,48(%esp)
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
addl 8(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
addl %edi,%edx
addl 12(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
addl %edx,%ecx
movl 196(%esp),%ebp
cmpl 200(%esp),%ebp
je L003done
movdqa 160(%esp),%xmm7
movdqa 176(%esp),%xmm6
movdqu (%ebp),%xmm0
movdqu 16(%ebp),%xmm1
movdqu 32(%ebp),%xmm2
movdqu 48(%ebp),%xmm3
addl $64,%ebp
.byte 102,15,56,0,198
movl %ebp,196(%esp)
movdqa %xmm7,96(%esp)
addl 16(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
.byte 102,15,56,0,206
addl %ecx,%ebx
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
paddd %xmm7,%xmm0
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
movdqa %xmm0,(%esp)
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
psubd %xmm7,%xmm0
roll $5,%eax
addl %esi,%edi
xorl %ecx,%ebp
rorl $7,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
.byte 102,15,56,0,214
addl %edx,%ecx
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
paddd %xmm7,%xmm1
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
movdqa %xmm1,16(%esp)
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
psubd %xmm7,%xmm1
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
.byte 102,15,56,0,222
addl %edi,%edx
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
paddd %xmm7,%xmm2
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
movdqa %xmm2,32(%esp)
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
psubd %xmm7,%xmm2
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
addl %ebp,%eax
rorl $7,%ecx
addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,8(%ebp)
movl %ecx,%ebx
movl %edx,12(%ebp)
xorl %edx,%ebx
movl %edi,16(%ebp)
movl %esi,%ebp
pshufd $238,%xmm0,%xmm4
andl %ebx,%esi
movl %ebp,%ebx
jmp L002loop
.align 4,0x90
L003done:
addl 16(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
addl %esi,%edi
xorl %ecx,%ebp
rorl $7,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
addl %edx,%ecx
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
addl %edi,%edx
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
addl %ebp,%eax
rorl $7,%ecx
addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
movl 204(%esp),%esp
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,8(%ebp)
movl %edx,12(%ebp)
movl %edi,16(%ebp)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _sha1_block_data_order_avx
.private_extern _sha1_block_data_order_avx
.align 4
_sha1_block_data_order_avx:
L_sha1_block_data_order_avx_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
call L004pic_point
L004pic_point:
popl %ebp
leal LK_XX_XX-L004pic_point(%ebp),%ebp
vzeroall
vmovdqa (%ebp),%xmm7
vmovdqa 16(%ebp),%xmm0
vmovdqa 32(%ebp),%xmm1
vmovdqa 48(%ebp),%xmm2
vmovdqa 64(%ebp),%xmm6
movl 20(%esp),%edi
movl 24(%esp),%ebp
movl 28(%esp),%edx
movl %esp,%esi
subl $208,%esp
andl $-64,%esp
vmovdqa %xmm0,112(%esp)
vmovdqa %xmm1,128(%esp)
vmovdqa %xmm2,144(%esp)
shll $6,%edx
vmovdqa %xmm7,160(%esp)
addl %ebp,%edx
vmovdqa %xmm6,176(%esp)
addl $64,%ebp
movl %edi,192(%esp)
movl %ebp,196(%esp)
movl %edx,200(%esp)
movl %esi,204(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
movl 16(%edi),%edi
movl %ebx,%esi
vmovdqu -64(%ebp),%xmm0
vmovdqu -48(%ebp),%xmm1
vmovdqu -32(%ebp),%xmm2
vmovdqu -16(%ebp),%xmm3
vpshufb %xmm6,%xmm0,%xmm0
vpshufb %xmm6,%xmm1,%xmm1
vpshufb %xmm6,%xmm2,%xmm2
vmovdqa %xmm7,96(%esp)
vpshufb %xmm6,%xmm3,%xmm3
vpaddd %xmm7,%xmm0,%xmm4
vpaddd %xmm7,%xmm1,%xmm5
vpaddd %xmm7,%xmm2,%xmm6
vmovdqa %xmm4,(%esp)
movl %ecx,%ebp
vmovdqa %xmm5,16(%esp)
xorl %edx,%ebp
vmovdqa %xmm6,32(%esp)
andl %ebp,%esi
jmp L005loop
.align 4,0x90
L005loop:
shrdl $2,%ebx,%ebx
xorl %edx,%esi
vpalignr $8,%xmm0,%xmm1,%xmm4
movl %eax,%ebp
addl (%esp),%edi
vpaddd %xmm3,%xmm7,%xmm7
vmovdqa %xmm0,64(%esp)
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpsrldq $4,%xmm3,%xmm6
addl %esi,%edi
andl %ebx,%ebp
vpxor %xmm0,%xmm4,%xmm4
xorl %ecx,%ebx
addl %eax,%edi
vpxor %xmm2,%xmm6,%xmm6
shrdl $7,%eax,%eax
xorl %ecx,%ebp
vmovdqa %xmm7,48(%esp)
movl %edi,%esi
addl 4(%esp),%edx
vpxor %xmm6,%xmm4,%xmm4
xorl %ebx,%eax
shldl $5,%edi,%edi
addl %ebp,%edx
andl %eax,%esi
vpsrld $31,%xmm4,%xmm6
xorl %ebx,%eax
addl %edi,%edx
shrdl $7,%edi,%edi
xorl %ebx,%esi
vpslldq $12,%xmm4,%xmm0
vpaddd %xmm4,%xmm4,%xmm4
movl %edx,%ebp
addl 8(%esp),%ecx
xorl %eax,%edi
shldl $5,%edx,%edx
vpsrld $30,%xmm0,%xmm7
vpor %xmm6,%xmm4,%xmm4
addl %esi,%ecx
andl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
vpslld $2,%xmm0,%xmm0
shrdl $7,%edx,%edx
xorl %eax,%ebp
vpxor %xmm7,%xmm4,%xmm4
movl %ecx,%esi
addl 12(%esp),%ebx
xorl %edi,%edx
shldl $5,%ecx,%ecx
vpxor %xmm0,%xmm4,%xmm4
addl %ebp,%ebx
andl %edx,%esi
vmovdqa 96(%esp),%xmm0
xorl %edi,%edx
addl %ecx,%ebx
shrdl $7,%ecx,%ecx
xorl %edi,%esi
vpalignr $8,%xmm1,%xmm2,%xmm5
movl %ebx,%ebp
addl 16(%esp),%eax
vpaddd %xmm4,%xmm0,%xmm0
vmovdqa %xmm1,80(%esp)
xorl %edx,%ecx
shldl $5,%ebx,%ebx
vpsrldq $4,%xmm4,%xmm7
addl %esi,%eax
andl %ecx,%ebp
vpxor %xmm1,%xmm5,%xmm5
xorl %edx,%ecx
addl %ebx,%eax
vpxor %xmm3,%xmm7,%xmm7
shrdl $7,%ebx,%ebx
xorl %edx,%ebp
vmovdqa %xmm0,(%esp)
movl %eax,%esi
addl 20(%esp),%edi
vpxor %xmm7,%xmm5,%xmm5
xorl %ecx,%ebx
shldl $5,%eax,%eax
addl %ebp,%edi
andl %ebx,%esi
vpsrld $31,%xmm5,%xmm7
xorl %ecx,%ebx
addl %eax,%edi
shrdl $7,%eax,%eax
xorl %ecx,%esi
vpslldq $12,%xmm5,%xmm1
vpaddd %xmm5,%xmm5,%xmm5
movl %edi,%ebp
addl 24(%esp),%edx
xorl %ebx,%eax
shldl $5,%edi,%edi
vpsrld $30,%xmm1,%xmm0
vpor %xmm7,%xmm5,%xmm5
addl %esi,%edx
andl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
vpslld $2,%xmm1,%xmm1
shrdl $7,%edi,%edi
xorl %ebx,%ebp
vpxor %xmm0,%xmm5,%xmm5
movl %edx,%esi
addl 28(%esp),%ecx
xorl %eax,%edi
shldl $5,%edx,%edx
vpxor %xmm1,%xmm5,%xmm5
addl %ebp,%ecx
andl %edi,%esi
vmovdqa 112(%esp),%xmm1
xorl %eax,%edi
addl %edx,%ecx
shrdl $7,%edx,%edx
xorl %eax,%esi
vpalignr $8,%xmm2,%xmm3,%xmm6
movl %ecx,%ebp
addl 32(%esp),%ebx
vpaddd %xmm5,%xmm1,%xmm1
vmovdqa %xmm2,96(%esp)
xorl %edi,%edx
shldl $5,%ecx,%ecx
vpsrldq $4,%xmm5,%xmm0
addl %esi,%ebx
andl %edx,%ebp
vpxor %xmm2,%xmm6,%xmm6
xorl %edi,%edx
addl %ecx,%ebx
vpxor %xmm4,%xmm0,%xmm0
shrdl $7,%ecx,%ecx
xorl %edi,%ebp
vmovdqa %xmm1,16(%esp)
movl %ebx,%esi
addl 36(%esp),%eax
vpxor %xmm0,%xmm6,%xmm6
xorl %edx,%ecx
shldl $5,%ebx,%ebx
addl %ebp,%eax
andl %ecx,%esi
vpsrld $31,%xmm6,%xmm0
xorl %edx,%ecx
addl %ebx,%eax
shrdl $7,%ebx,%ebx
xorl %edx,%esi
vpslldq $12,%xmm6,%xmm2
vpaddd %xmm6,%xmm6,%xmm6
movl %eax,%ebp
addl 40(%esp),%edi
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpsrld $30,%xmm2,%xmm1
vpor %xmm0,%xmm6,%xmm6
addl %esi,%edi
andl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
vpslld $2,%xmm2,%xmm2
vmovdqa 64(%esp),%xmm0
shrdl $7,%eax,%eax
xorl %ecx,%ebp
vpxor %xmm1,%xmm6,%xmm6
movl %edi,%esi
addl 44(%esp),%edx
xorl %ebx,%eax
shldl $5,%edi,%edi
vpxor %xmm2,%xmm6,%xmm6
addl %ebp,%edx
andl %eax,%esi
vmovdqa 112(%esp),%xmm2
xorl %ebx,%eax
addl %edi,%edx
shrdl $7,%edi,%edi
xorl %ebx,%esi
vpalignr $8,%xmm3,%xmm4,%xmm7
movl %edx,%ebp
addl 48(%esp),%ecx
vpaddd %xmm6,%xmm2,%xmm2
vmovdqa %xmm3,64(%esp)
xorl %eax,%edi
shldl $5,%edx,%edx
vpsrldq $4,%xmm6,%xmm1
addl %esi,%ecx
andl %edi,%ebp
vpxor %xmm3,%xmm7,%xmm7
xorl %eax,%edi
addl %edx,%ecx
vpxor %xmm5,%xmm1,%xmm1
shrdl $7,%edx,%edx
xorl %eax,%ebp
vmovdqa %xmm2,32(%esp)
movl %ecx,%esi
addl 52(%esp),%ebx
vpxor %xmm1,%xmm7,%xmm7
xorl %edi,%edx
shldl $5,%ecx,%ecx
addl %ebp,%ebx
andl %edx,%esi
vpsrld $31,%xmm7,%xmm1
xorl %edi,%edx
addl %ecx,%ebx
shrdl $7,%ecx,%ecx
xorl %edi,%esi
vpslldq $12,%xmm7,%xmm3
vpaddd %xmm7,%xmm7,%xmm7
movl %ebx,%ebp
addl 56(%esp),%eax
xorl %edx,%ecx
shldl $5,%ebx,%ebx
vpsrld $30,%xmm3,%xmm2
vpor %xmm1,%xmm7,%xmm7
addl %esi,%eax
andl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
vpslld $2,%xmm3,%xmm3
vmovdqa 80(%esp),%xmm1
shrdl $7,%ebx,%ebx
xorl %edx,%ebp
vpxor %xmm2,%xmm7,%xmm7
movl %eax,%esi
addl 60(%esp),%edi
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpxor %xmm3,%xmm7,%xmm7
addl %ebp,%edi
andl %ebx,%esi
vmovdqa 112(%esp),%xmm3
xorl %ecx,%ebx
addl %eax,%edi
vpalignr $8,%xmm6,%xmm7,%xmm2
vpxor %xmm4,%xmm0,%xmm0
shrdl $7,%eax,%eax
xorl %ecx,%esi
movl %edi,%ebp
addl (%esp),%edx
vpxor %xmm1,%xmm0,%xmm0
vmovdqa %xmm4,80(%esp)
xorl %ebx,%eax
shldl $5,%edi,%edi
vmovdqa %xmm3,%xmm4
vpaddd %xmm7,%xmm3,%xmm3
addl %esi,%edx
andl %eax,%ebp
vpxor %xmm2,%xmm0,%xmm0
xorl %ebx,%eax
addl %edi,%edx
shrdl $7,%edi,%edi
xorl %ebx,%ebp
vpsrld $30,%xmm0,%xmm2
vmovdqa %xmm3,48(%esp)
movl %edx,%esi
addl 4(%esp),%ecx
xorl %eax,%edi
shldl $5,%edx,%edx
vpslld $2,%xmm0,%xmm0
addl %ebp,%ecx
andl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
shrdl $7,%edx,%edx
xorl %eax,%esi
movl %ecx,%ebp
addl 8(%esp),%ebx
vpor %xmm2,%xmm0,%xmm0
xorl %edi,%edx
shldl $5,%ecx,%ecx
vmovdqa 96(%esp),%xmm2
addl %esi,%ebx
andl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
addl 12(%esp),%eax
xorl %edi,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpalignr $8,%xmm7,%xmm0,%xmm3
vpxor %xmm5,%xmm1,%xmm1
addl 16(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm5,96(%esp)
addl %esi,%edi
xorl %ecx,%ebp
vmovdqa %xmm4,%xmm5
vpaddd %xmm0,%xmm4,%xmm4
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpxor %xmm3,%xmm1,%xmm1
addl 20(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
vpsrld $30,%xmm1,%xmm3
vmovdqa %xmm4,(%esp)
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
vpslld $2,%xmm1,%xmm1
addl 24(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
vpor %xmm3,%xmm1,%xmm1
addl 28(%esp),%ebx
xorl %edi,%ebp
vmovdqa 64(%esp),%xmm3
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpalignr $8,%xmm0,%xmm1,%xmm4
vpxor %xmm6,%xmm2,%xmm2
addl 32(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
vpxor %xmm3,%xmm2,%xmm2
vmovdqa %xmm6,64(%esp)
addl %esi,%eax
xorl %edx,%ebp
vmovdqa 128(%esp),%xmm6
vpaddd %xmm1,%xmm5,%xmm5
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpxor %xmm4,%xmm2,%xmm2
addl 36(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
vpsrld $30,%xmm2,%xmm4
vmovdqa %xmm5,16(%esp)
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpslld $2,%xmm2,%xmm2
addl 40(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
vpor %xmm4,%xmm2,%xmm2
addl 44(%esp),%ecx
xorl %eax,%ebp
vmovdqa 80(%esp),%xmm4
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
vpalignr $8,%xmm1,%xmm2,%xmm5
vpxor %xmm7,%xmm3,%xmm3
addl 48(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
vpxor %xmm4,%xmm3,%xmm3
vmovdqa %xmm7,80(%esp)
addl %esi,%ebx
xorl %edi,%ebp
vmovdqa %xmm6,%xmm7
vpaddd %xmm2,%xmm6,%xmm6
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpxor %xmm5,%xmm3,%xmm3
addl 52(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
vpsrld $30,%xmm3,%xmm5
vmovdqa %xmm6,32(%esp)
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpslld $2,%xmm3,%xmm3
addl 56(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ecx,%ebp
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpor %xmm5,%xmm3,%xmm3
addl 60(%esp),%edx
xorl %ebx,%ebp
vmovdqa 96(%esp),%xmm5
movl %edi,%esi
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
vpalignr $8,%xmm2,%xmm3,%xmm6
vpxor %xmm0,%xmm4,%xmm4
addl (%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
vpxor %xmm5,%xmm4,%xmm4
vmovdqa %xmm0,96(%esp)
addl %esi,%ecx
xorl %eax,%ebp
vmovdqa %xmm7,%xmm0
vpaddd %xmm3,%xmm7,%xmm7
shrdl $7,%edi,%edi
addl %edx,%ecx
vpxor %xmm6,%xmm4,%xmm4
addl 4(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
vpsrld $30,%xmm4,%xmm6
vmovdqa %xmm7,48(%esp)
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpslld $2,%xmm4,%xmm4
addl 8(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpor %xmm6,%xmm4,%xmm4
addl 12(%esp),%edi
xorl %ecx,%ebp
vmovdqa 64(%esp),%xmm6
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpalignr $8,%xmm3,%xmm4,%xmm7
vpxor %xmm1,%xmm5,%xmm5
addl 16(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
vpxor %xmm6,%xmm5,%xmm5
vmovdqa %xmm1,64(%esp)
addl %esi,%edx
xorl %ebx,%ebp
vmovdqa %xmm0,%xmm1
vpaddd %xmm4,%xmm0,%xmm0
shrdl $7,%eax,%eax
addl %edi,%edx
vpxor %xmm7,%xmm5,%xmm5
addl 20(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
vpsrld $30,%xmm5,%xmm7
vmovdqa %xmm0,(%esp)
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
vpslld $2,%xmm5,%xmm5
addl 24(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpor %xmm7,%xmm5,%xmm5
addl 28(%esp),%eax
vmovdqa 80(%esp),%xmm7
shrdl $7,%ecx,%ecx
movl %ebx,%esi
xorl %edx,%ebp
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
vpalignr $8,%xmm4,%xmm5,%xmm0
vpxor %xmm2,%xmm6,%xmm6
addl 32(%esp),%edi
andl %ecx,%esi
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
vpxor %xmm7,%xmm6,%xmm6
vmovdqa %xmm2,80(%esp)
movl %eax,%ebp
xorl %ecx,%esi
vmovdqa %xmm1,%xmm2
vpaddd %xmm5,%xmm1,%xmm1
shldl $5,%eax,%eax
addl %esi,%edi
vpxor %xmm0,%xmm6,%xmm6
xorl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
addl 36(%esp),%edx
vpsrld $30,%xmm6,%xmm0
vmovdqa %xmm1,16(%esp)
andl %ebx,%ebp
xorl %ecx,%ebx
shrdl $7,%eax,%eax
movl %edi,%esi
vpslld $2,%xmm6,%xmm6
xorl %ebx,%ebp
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %eax,%esi
xorl %ebx,%eax
addl %edi,%edx
addl 40(%esp),%ecx
andl %eax,%esi
vpor %xmm0,%xmm6,%xmm6
xorl %ebx,%eax
shrdl $7,%edi,%edi
vmovdqa 96(%esp),%xmm0
movl %edx,%ebp
xorl %eax,%esi
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
addl 44(%esp),%ebx
andl %edi,%ebp
xorl %eax,%edi
shrdl $7,%edx,%edx
movl %ecx,%esi
xorl %edi,%ebp
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edx,%esi
xorl %edi,%edx
addl %ecx,%ebx
vpalignr $8,%xmm5,%xmm6,%xmm1
vpxor %xmm3,%xmm7,%xmm7
addl 48(%esp),%eax
andl %edx,%esi
xorl %edi,%edx
shrdl $7,%ecx,%ecx
vpxor %xmm0,%xmm7,%xmm7
vmovdqa %xmm3,96(%esp)
movl %ebx,%ebp
xorl %edx,%esi
vmovdqa 144(%esp),%xmm3
vpaddd %xmm6,%xmm2,%xmm2
shldl $5,%ebx,%ebx
addl %esi,%eax
vpxor %xmm1,%xmm7,%xmm7
xorl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
addl 52(%esp),%edi
vpsrld $30,%xmm7,%xmm1
vmovdqa %xmm2,32(%esp)
andl %ecx,%ebp
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
movl %eax,%esi
vpslld $2,%xmm7,%xmm7
xorl %ecx,%ebp
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%edi
addl 56(%esp),%edx
andl %ebx,%esi
vpor %xmm1,%xmm7,%xmm7
xorl %ecx,%ebx
shrdl $7,%eax,%eax
vmovdqa 64(%esp),%xmm1
movl %edi,%ebp
xorl %ebx,%esi
shldl $5,%edi,%edi
addl %esi,%edx
xorl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
addl 60(%esp),%ecx
andl %eax,%ebp
xorl %ebx,%eax
shrdl $7,%edi,%edi
movl %edx,%esi
xorl %eax,%ebp
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
vpalignr $8,%xmm6,%xmm7,%xmm2
vpxor %xmm4,%xmm0,%xmm0
addl (%esp),%ebx
andl %edi,%esi
xorl %eax,%edi
shrdl $7,%edx,%edx
vpxor %xmm1,%xmm0,%xmm0
vmovdqa %xmm4,64(%esp)
movl %ecx,%ebp
xorl %edi,%esi
vmovdqa %xmm3,%xmm4
vpaddd %xmm7,%xmm3,%xmm3
shldl $5,%ecx,%ecx
addl %esi,%ebx
vpxor %xmm2,%xmm0,%xmm0
xorl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
addl 4(%esp),%eax
vpsrld $30,%xmm0,%xmm2
vmovdqa %xmm3,48(%esp)
andl %edx,%ebp
xorl %edi,%edx
shrdl $7,%ecx,%ecx
movl %ebx,%esi
vpslld $2,%xmm0,%xmm0
xorl %edx,%ebp
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
addl 8(%esp),%edi
andl %ecx,%esi
vpor %xmm2,%xmm0,%xmm0
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
vmovdqa 80(%esp),%xmm2
movl %eax,%ebp
xorl %ecx,%esi
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
addl 12(%esp),%edx
andl %ebx,%ebp
xorl %ecx,%ebx
shrdl $7,%eax,%eax
movl %edi,%esi
xorl %ebx,%ebp
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %eax,%esi
xorl %ebx,%eax
addl %edi,%edx
vpalignr $8,%xmm7,%xmm0,%xmm3
vpxor %xmm5,%xmm1,%xmm1
addl 16(%esp),%ecx
andl %eax,%esi
xorl %ebx,%eax
shrdl $7,%edi,%edi
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm5,80(%esp)
movl %edx,%ebp
xorl %eax,%esi
vmovdqa %xmm4,%xmm5
vpaddd %xmm0,%xmm4,%xmm4
shldl $5,%edx,%edx
addl %esi,%ecx
vpxor %xmm3,%xmm1,%xmm1
xorl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
addl 20(%esp),%ebx
vpsrld $30,%xmm1,%xmm3
vmovdqa %xmm4,(%esp)
andl %edi,%ebp
xorl %eax,%edi
shrdl $7,%edx,%edx
movl %ecx,%esi
vpslld $2,%xmm1,%xmm1
xorl %edi,%ebp
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edx,%esi
xorl %edi,%edx
addl %ecx,%ebx
addl 24(%esp),%eax
andl %edx,%esi
vpor %xmm3,%xmm1,%xmm1
xorl %edi,%edx
shrdl $7,%ecx,%ecx
vmovdqa 96(%esp),%xmm3
movl %ebx,%ebp
xorl %edx,%esi
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
addl 28(%esp),%edi
andl %ecx,%ebp
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
movl %eax,%esi
xorl %ecx,%ebp
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%edi
vpalignr $8,%xmm0,%xmm1,%xmm4
vpxor %xmm6,%xmm2,%xmm2
addl 32(%esp),%edx
andl %ebx,%esi
xorl %ecx,%ebx
shrdl $7,%eax,%eax
vpxor %xmm3,%xmm2,%xmm2
vmovdqa %xmm6,96(%esp)
movl %edi,%ebp
xorl %ebx,%esi
vmovdqa %xmm5,%xmm6
vpaddd %xmm1,%xmm5,%xmm5
shldl $5,%edi,%edi
addl %esi,%edx
vpxor %xmm4,%xmm2,%xmm2
xorl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
addl 36(%esp),%ecx
vpsrld $30,%xmm2,%xmm4
vmovdqa %xmm5,16(%esp)
andl %eax,%ebp
xorl %ebx,%eax
shrdl $7,%edi,%edi
movl %edx,%esi
vpslld $2,%xmm2,%xmm2
xorl %eax,%ebp
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
addl 40(%esp),%ebx
andl %edi,%esi
vpor %xmm4,%xmm2,%xmm2
xorl %eax,%edi
shrdl $7,%edx,%edx
vmovdqa 64(%esp),%xmm4
movl %ecx,%ebp
xorl %edi,%esi
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
addl 44(%esp),%eax
andl %edx,%ebp
xorl %edi,%edx
shrdl $7,%ecx,%ecx
movl %ebx,%esi
xorl %edx,%ebp
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
addl %ebx,%eax
vpalignr $8,%xmm1,%xmm2,%xmm5
vpxor %xmm7,%xmm3,%xmm3
addl 48(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
vpxor %xmm4,%xmm3,%xmm3
vmovdqa %xmm7,64(%esp)
addl %esi,%edi
xorl %ecx,%ebp
vmovdqa %xmm6,%xmm7
vpaddd %xmm2,%xmm6,%xmm6
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpxor %xmm5,%xmm3,%xmm3
addl 52(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
vpsrld $30,%xmm3,%xmm5
vmovdqa %xmm6,32(%esp)
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
vpslld $2,%xmm3,%xmm3
addl 56(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
vpor %xmm5,%xmm3,%xmm3
addl 60(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl (%esp),%eax
vpaddd %xmm3,%xmm7,%xmm7
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
vmovdqa %xmm7,48(%esp)
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 4(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 8(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
addl 12(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
movl 196(%esp),%ebp
cmpl 200(%esp),%ebp
je L006done
vmovdqa 160(%esp),%xmm7
vmovdqa 176(%esp),%xmm6
vmovdqu (%ebp),%xmm0
vmovdqu 16(%ebp),%xmm1
vmovdqu 32(%ebp),%xmm2
vmovdqu 48(%ebp),%xmm3
addl $64,%ebp
vpshufb %xmm6,%xmm0,%xmm0
movl %ebp,196(%esp)
vmovdqa %xmm7,96(%esp)
addl 16(%esp),%ebx
xorl %edi,%esi
vpshufb %xmm6,%xmm1,%xmm1
movl %ecx,%ebp
shldl $5,%ecx,%ecx
vpaddd %xmm7,%xmm0,%xmm4
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
vmovdqa %xmm4,(%esp)
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ecx,%ebp
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
vpshufb %xmm6,%xmm2,%xmm2
movl %edx,%ebp
shldl $5,%edx,%edx
vpaddd %xmm7,%xmm1,%xmm5
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
vmovdqa %xmm5,16(%esp)
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
vpshufb %xmm6,%xmm3,%xmm3
movl %edi,%ebp
shldl $5,%edi,%edi
vpaddd %xmm7,%xmm2,%xmm6
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
vmovdqa %xmm6,32(%esp)
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
shrdl $7,%ecx,%ecx
addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,%ebx
movl %ecx,8(%ebp)
xorl %edx,%ebx
movl %edx,12(%ebp)
movl %edi,16(%ebp)
movl %esi,%ebp
andl %ebx,%esi
movl %ebp,%ebx
jmp L005loop
.align 4,0x90
L006done:
addl 16(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ecx,%ebp
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vzeroall
movl 192(%esp),%ebp
addl (%ebp),%eax
movl 204(%esp),%esp
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,8(%ebp)
movl %edx,12(%ebp)
movl %edi,16(%ebp)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 6,0x90
LK_XX_XX:
.long 1518500249,1518500249,1518500249,1518500249
.long 1859775393,1859775393,1859775393,1859775393
.long 2400959708,2400959708,2400959708,2400959708
.long 3395469782,3395469782,3395469782,3395469782
.long 66051,67438087,134810123,202182159
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115
.byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82
.byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112
.byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 15,187
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/vpaes-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.align 6,0x90
L_vpaes_consts:
.long 218628480,235210255,168496130,67568393
.long 252381056,17041926,33884169,51187212
.long 252645135,252645135,252645135,252645135
.long 1512730624,3266504856,1377990664,3401244816
.long 830229760,1275146365,2969422977,3447763452
.long 3411033600,2979783055,338359620,2782886510
.long 4209124096,907596821,221174255,1006095553
.long 191964160,3799684038,3164090317,1589111125
.long 182528256,1777043520,2877432650,3265356744
.long 1874708224,3503451415,3305285752,363511674
.long 1606117888,3487855781,1093350906,2384367825
.long 197121,67569157,134941193,202313229
.long 67569157,134941193,202313229,197121
.long 134941193,202313229,197121,67569157
.long 202313229,197121,67569157,134941193
.long 33619971,100992007,168364043,235736079
.long 235736079,33619971,100992007,168364043
.long 168364043,235736079,33619971,100992007
.long 100992007,168364043,235736079,33619971
.long 50462976,117835012,185207048,252579084
.long 252314880,51251460,117574920,184942860
.long 184682752,252054788,50987272,118359308
.long 118099200,185467140,251790600,50727180
.long 2946363062,528716217,1300004225,1881839624
.long 1532713819,1532713819,1532713819,1532713819
.long 3602276352,4288629033,3737020424,4153884961
.long 1354558464,32357713,2958822624,3775749553
.long 1201988352,132424512,1572796698,503232858
.long 2213177600,1597421020,4103937655,675398315
.long 2749646592,4273543773,1511898873,121693092
.long 3040248576,1103263732,2871565598,1608280554
.long 2236667136,2588920351,482954393,64377734
.long 3069987328,291237287,2117370568,3650299247
.long 533321216,3573750986,2572112006,1401264716
.long 1339849704,2721158661,548607111,3445553514
.long 2128193280,3054596040,2183486460,1257083700
.long 655635200,1165381986,3923443150,2344132524
.long 190078720,256924420,290342170,357187870
.long 1610966272,2263057382,4103205268,309794674
.long 2592527872,2233205587,1335446729,3402964816
.long 3973531904,3225098121,3002836325,1918774430
.long 3870401024,2102906079,2284471353,4117666579
.long 617007872,1021508343,366931923,691083277
.long 2528395776,3491914898,2968704004,1613121270
.long 3445188352,3247741094,844474987,4093578302
.long 651481088,1190302358,1689581232,574775300
.long 4289380608,206939853,2555985458,2489840491
.long 2130264064,327674451,3566485037,3349835193
.long 2470714624,316102159,3636825756,3393945945
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105
.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83
.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117
.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105
.byte 118,101,114,115,105,116,121,41,0
.align 6,0x90
.private_extern __vpaes_preheat
.align 4
__vpaes_preheat:
addl (%esp),%ebp
movdqa -48(%ebp),%xmm7
movdqa -16(%ebp),%xmm6
ret
.private_extern __vpaes_encrypt_core
.align 4
__vpaes_encrypt_core:
movl $16,%ecx
movl 240(%edx),%eax
movdqa %xmm6,%xmm1
movdqa (%ebp),%xmm2
pandn %xmm0,%xmm1
pand %xmm6,%xmm0
movdqu (%edx),%xmm5
.byte 102,15,56,0,208
movdqa 16(%ebp),%xmm0
pxor %xmm5,%xmm2
psrld $4,%xmm1
addl $16,%edx
.byte 102,15,56,0,193
leal 192(%ebp),%ebx
pxor %xmm2,%xmm0
jmp L000enc_entry
.align 4,0x90
L001enc_loop:
movdqa 32(%ebp),%xmm4
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa 64(%ebp),%xmm5
pxor %xmm4,%xmm0
movdqa -64(%ebx,%ecx,1),%xmm1
.byte 102,15,56,0,234
movdqa 80(%ebp),%xmm2
movdqa (%ebx,%ecx,1),%xmm4
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addl $16,%edx
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addl $16,%ecx
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andl $48,%ecx
subl $1,%eax
pxor %xmm3,%xmm0
L000enc_entry:
movdqa %xmm6,%xmm1
movdqa -32(%ebp),%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm6,%xmm0
.byte 102,15,56,0,232
movdqa %xmm7,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm7,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm7,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm7,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%edx),%xmm5
pxor %xmm1,%xmm3
jnz L001enc_loop
movdqa 96(%ebp),%xmm4
movdqa 112(%ebp),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%ebx,%ecx,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.private_extern __vpaes_decrypt_core
.align 4
__vpaes_decrypt_core:
leal 608(%ebp),%ebx
movl 240(%edx),%eax
movdqa %xmm6,%xmm1
movdqa -64(%ebx),%xmm2
pandn %xmm0,%xmm1
movl %eax,%ecx
psrld $4,%xmm1
movdqu (%edx),%xmm5
shll $4,%ecx
pand %xmm6,%xmm0
.byte 102,15,56,0,208
movdqa -48(%ebx),%xmm0
xorl $48,%ecx
.byte 102,15,56,0,193
andl $48,%ecx
pxor %xmm5,%xmm2
movdqa 176(%ebp),%xmm5
pxor %xmm2,%xmm0
addl $16,%edx
leal -352(%ebx,%ecx,1),%ecx
jmp L002dec_entry
.align 4,0x90
L003dec_loop:
movdqa -32(%ebx),%xmm4
movdqa -16(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa (%ebx),%xmm4
pxor %xmm1,%xmm0
movdqa 16(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa 32(%ebx),%xmm4
pxor %xmm1,%xmm0
movdqa 48(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa 64(%ebx),%xmm4
pxor %xmm1,%xmm0
movdqa 80(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
addl $16,%edx
.byte 102,15,58,15,237,12
pxor %xmm1,%xmm0
subl $1,%eax
L002dec_entry:
movdqa %xmm6,%xmm1
movdqa -32(%ebp),%xmm2
pandn %xmm0,%xmm1
pand %xmm6,%xmm0
psrld $4,%xmm1
.byte 102,15,56,0,208
movdqa %xmm7,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm7,%xmm4
pxor %xmm2,%xmm3
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm7,%xmm2
.byte 102,15,56,0,211
movdqa %xmm7,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%edx),%xmm0
pxor %xmm1,%xmm3
jnz L003dec_loop
movdqa 96(%ebx),%xmm4
.byte 102,15,56,0,226
pxor %xmm0,%xmm4
movdqa 112(%ebx),%xmm0
movdqa (%ecx),%xmm2
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
.byte 102,15,56,0,194
ret
.private_extern __vpaes_schedule_core
.align 4
__vpaes_schedule_core:
addl (%esp),%ebp
movdqu (%esi),%xmm0
movdqa 320(%ebp),%xmm2
movdqa %xmm0,%xmm3
leal (%ebp),%ebx
movdqa %xmm2,4(%esp)
call __vpaes_schedule_transform
movdqa %xmm0,%xmm7
testl %edi,%edi
jnz L004schedule_am_decrypting
movdqu %xmm0,(%edx)
jmp L005schedule_go
L004schedule_am_decrypting:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
movdqu %xmm3,(%edx)
xorl $48,%ecx
L005schedule_go:
cmpl $192,%eax
ja L006schedule_256
je L007schedule_192
L008schedule_128:
movl $10,%eax
L009loop_schedule_128:
call __vpaes_schedule_round
decl %eax
jz L010schedule_mangle_last
call __vpaes_schedule_mangle
jmp L009loop_schedule_128
.align 4,0x90
L007schedule_192:
movdqu 8(%esi),%xmm0
call __vpaes_schedule_transform
movdqa %xmm0,%xmm6
pxor %xmm4,%xmm4
movhlps %xmm4,%xmm6
movl $4,%eax
L011loop_schedule_192:
call __vpaes_schedule_round
.byte 102,15,58,15,198,8
call __vpaes_schedule_mangle
call __vpaes_schedule_192_smear
call __vpaes_schedule_mangle
call __vpaes_schedule_round
decl %eax
jz L010schedule_mangle_last
call __vpaes_schedule_mangle
call __vpaes_schedule_192_smear
jmp L011loop_schedule_192
.align 4,0x90
L006schedule_256:
movdqu 16(%esi),%xmm0
call __vpaes_schedule_transform
movl $7,%eax
L012loop_schedule_256:
call __vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call __vpaes_schedule_round
decl %eax
jz L010schedule_mangle_last
call __vpaes_schedule_mangle
pshufd $255,%xmm0,%xmm0
movdqa %xmm7,20(%esp)
movdqa %xmm6,%xmm7
call L_vpaes_schedule_low_round
movdqa 20(%esp),%xmm7
jmp L012loop_schedule_256
.align 4,0x90
L010schedule_mangle_last:
leal 384(%ebp),%ebx
testl %edi,%edi
jnz L013schedule_mangle_last_dec
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,193
leal 352(%ebp),%ebx
addl $32,%edx
L013schedule_mangle_last_dec:
addl $-16,%edx
pxor 336(%ebp),%xmm0
call __vpaes_schedule_transform
movdqu %xmm0,(%edx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.private_extern __vpaes_schedule_192_smear
.align 4
__vpaes_schedule_192_smear:
pshufd $128,%xmm6,%xmm1
pshufd $254,%xmm7,%xmm0
pxor %xmm1,%xmm6
pxor %xmm1,%xmm1
pxor %xmm0,%xmm6
movdqa %xmm6,%xmm0
movhlps %xmm1,%xmm6
ret
.private_extern __vpaes_schedule_round
.align 4
__vpaes_schedule_round:
movdqa 8(%esp),%xmm2
pxor %xmm1,%xmm1
.byte 102,15,58,15,202,15
.byte 102,15,58,15,210,15
pxor %xmm1,%xmm7
pshufd $255,%xmm0,%xmm0
.byte 102,15,58,15,192,1
movdqa %xmm2,8(%esp)
L_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor 336(%ebp),%xmm7
movdqa -16(%ebp),%xmm4
movdqa -48(%ebp),%xmm5
movdqa %xmm4,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm4,%xmm0
movdqa -32(%ebp),%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm5,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm5,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm5,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa 32(%ebp),%xmm4
.byte 102,15,56,0,226
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.private_extern __vpaes_schedule_transform
.align 4
__vpaes_schedule_transform:
movdqa -16(%ebp),%xmm2
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
movdqa (%ebx),%xmm2
.byte 102,15,56,0,208
movdqa 16(%ebx),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.private_extern __vpaes_schedule_mangle
.align 4
__vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa 128(%ebp),%xmm5
testl %edi,%edi
jnz L014schedule_mangle_dec
addl $16,%edx
pxor 336(%ebp),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
jmp L015schedule_mangle_both
.align 4,0x90
L014schedule_mangle_dec:
movdqa -16(%ebp),%xmm2
leal 416(%ebp),%esi
movdqa %xmm2,%xmm1
pandn %xmm4,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm4
movdqa (%esi),%xmm2
.byte 102,15,56,0,212
movdqa 16(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 32(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 48(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 64(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 80(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 96(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 112(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
addl $-16,%edx
L015schedule_mangle_both:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
addl $-16,%ecx
andl $48,%ecx
movdqu %xmm3,(%edx)
ret
.globl _vpaes_set_encrypt_key
.private_extern _vpaes_set_encrypt_key
.align 4
_vpaes_set_encrypt_key:
L_vpaes_set_encrypt_key_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call L016pic
L016pic:
popl %ebx
leal _BORINGSSL_function_hit+5-L016pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%eax
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movl %eax,%ebx
shrl $5,%ebx
addl $5,%ebx
movl %ebx,240(%edx)
movl $48,%ecx
movl $0,%edi
leal L_vpaes_consts+0x30-L017pic_point,%ebp
call __vpaes_schedule_core
L017pic_point:
movl 48(%esp),%esp
xorl %eax,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _vpaes_set_decrypt_key
.private_extern _vpaes_set_decrypt_key
.align 4
_vpaes_set_decrypt_key:
L_vpaes_set_decrypt_key_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%eax
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movl %eax,%ebx
shrl $5,%ebx
addl $5,%ebx
movl %ebx,240(%edx)
shll $4,%ebx
leal 16(%edx,%ebx,1),%edx
movl $1,%edi
movl %eax,%ecx
shrl $1,%ecx
andl $32,%ecx
xorl $32,%ecx
leal L_vpaes_consts+0x30-L018pic_point,%ebp
call __vpaes_schedule_core
L018pic_point:
movl 48(%esp),%esp
xorl %eax,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _vpaes_encrypt
.private_extern _vpaes_encrypt
.align 4
_vpaes_encrypt:
L_vpaes_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call L019pic
L019pic:
popl %ebx
leal _BORINGSSL_function_hit+4-L019pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
leal L_vpaes_consts+0x30-L020pic_point,%ebp
call __vpaes_preheat
L020pic_point:
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%edi
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movdqu (%esi),%xmm0
call __vpaes_encrypt_core
movdqu %xmm0,(%edi)
movl 48(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _vpaes_decrypt
.private_extern _vpaes_decrypt
.align 4
_vpaes_decrypt:
L_vpaes_decrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
leal L_vpaes_consts+0x30-L021pic_point,%ebp
call __vpaes_preheat
L021pic_point:
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%edi
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movdqu (%esi),%xmm0
call __vpaes_decrypt_core
movdqu %xmm0,(%edi)
movl 48(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _vpaes_cbc_encrypt
.private_extern _vpaes_cbc_encrypt
.align 4
_vpaes_cbc_encrypt:
L_vpaes_cbc_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
subl $16,%eax
jc L022cbc_abort
leal -56(%esp),%ebx
movl 36(%esp),%ebp
andl $-16,%ebx
movl 40(%esp),%ecx
xchgl %esp,%ebx
movdqu (%ebp),%xmm1
subl %esi,%edi
movl %ebx,48(%esp)
movl %edi,(%esp)
movl %edx,4(%esp)
movl %ebp,8(%esp)
movl %eax,%edi
leal L_vpaes_consts+0x30-L023pic_point,%ebp
call __vpaes_preheat
L023pic_point:
cmpl $0,%ecx
je L024cbc_dec_loop
jmp L025cbc_enc_loop
.align 4,0x90
L025cbc_enc_loop:
movdqu (%esi),%xmm0
pxor %xmm1,%xmm0
call __vpaes_encrypt_core
movl (%esp),%ebx
movl 4(%esp),%edx
movdqa %xmm0,%xmm1
movdqu %xmm0,(%ebx,%esi,1)
leal 16(%esi),%esi
subl $16,%edi
jnc L025cbc_enc_loop
jmp L026cbc_done
.align 4,0x90
L024cbc_dec_loop:
movdqu (%esi),%xmm0
movdqa %xmm1,16(%esp)
movdqa %xmm0,32(%esp)
call __vpaes_decrypt_core
movl (%esp),%ebx
movl 4(%esp),%edx
pxor 16(%esp),%xmm0
movdqa 32(%esp),%xmm1
movdqu %xmm0,(%ebx,%esi,1)
leal 16(%esi),%esi
subl $16,%edi
jnc L024cbc_dec_loop
L026cbc_done:
movl 8(%esp),%ebx
movl 48(%esp),%esp
movdqu %xmm1,(%ebx)
L022cbc_abort:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 21,551
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/co-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _bn_mul_comba8
.private_extern _bn_mul_comba8
.align 4
_bn_mul_comba8:
L_bn_mul_comba8_begin:
pushl %esi
movl 12(%esp),%esi
pushl %edi
movl 20(%esp),%edi
pushl %ebp
pushl %ebx
xorl %ebx,%ebx
movl (%esi),%eax
xorl %ecx,%ecx
movl (%edi),%edx
# ################## Calculate word 0
xorl %ebp,%ebp
# mul a[0]*b[0]
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,(%eax)
movl 4(%esi),%eax
# saved r[0]
# ################## Calculate word 1
xorl %ebx,%ebx
# mul a[1]*b[0]
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
# mul a[0]*b[1]
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl (%edi),%edx
adcl $0,%ebx
movl %ecx,4(%eax)
movl 8(%esi),%eax
# saved r[1]
# ################## Calculate word 2
xorl %ecx,%ecx
# mul a[2]*b[0]
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 4(%edi),%edx
adcl $0,%ecx
# mul a[1]*b[1]
mull %edx
addl %eax,%ebp
movl (%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
# mul a[0]*b[2]
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl (%edi),%edx
adcl $0,%ecx
movl %ebp,8(%eax)
movl 12(%esi),%eax
# saved r[2]
# ################## Calculate word 3
xorl %ebp,%ebp
# mul a[3]*b[0]
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
# mul a[2]*b[1]
mull %edx
addl %eax,%ebx
movl 4(%esi),%eax
adcl %edx,%ecx
movl 8(%edi),%edx
adcl $0,%ebp
# mul a[1]*b[2]
mull %edx
addl %eax,%ebx
movl (%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
# mul a[0]*b[3]
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,12(%eax)
movl 16(%esi),%eax
# saved r[3]
# ################## Calculate word 4
xorl %ebx,%ebx
# mul a[4]*b[0]
mull %edx
addl %eax,%ecx
movl 12(%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
# mul a[3]*b[1]
mull %edx
addl %eax,%ecx
movl 8(%esi),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
# mul a[2]*b[2]
mull %edx
addl %eax,%ecx
movl 4(%esi),%eax
adcl %edx,%ebp
movl 12(%edi),%edx
adcl $0,%ebx
# mul a[1]*b[3]
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
# mul a[0]*b[4]
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl (%edi),%edx
adcl $0,%ebx
movl %ecx,16(%eax)
movl 20(%esi),%eax
# saved r[4]
# ################## Calculate word 5
xorl %ecx,%ecx
# mul a[5]*b[0]
mull %edx
addl %eax,%ebp
movl 16(%esi),%eax
adcl %edx,%ebx
movl 4(%edi),%edx
adcl $0,%ecx
# mul a[4]*b[1]
mull %edx
addl %eax,%ebp
movl 12(%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
# mul a[3]*b[2]
mull %edx
addl %eax,%ebp
movl 8(%esi),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
# mul a[2]*b[3]
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 16(%edi),%edx
adcl $0,%ecx
# mul a[1]*b[4]
mull %edx
addl %eax,%ebp
movl (%esi),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
# mul a[0]*b[5]
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl (%edi),%edx
adcl $0,%ecx
movl %ebp,20(%eax)
movl 24(%esi),%eax
# saved r[5]
# ################## Calculate word 6
xorl %ebp,%ebp
# mul a[6]*b[0]
mull %edx
addl %eax,%ebx
movl 20(%esi),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
# mul a[5]*b[1]
mull %edx
addl %eax,%ebx
movl 16(%esi),%eax
adcl %edx,%ecx
movl 8(%edi),%edx
adcl $0,%ebp
# mul a[4]*b[2]
mull %edx
addl %eax,%ebx
movl 12(%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
# mul a[3]*b[3]
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 16(%edi),%edx
adcl $0,%ebp
# mul a[2]*b[4]
mull %edx
addl %eax,%ebx
movl 4(%esi),%eax
adcl %edx,%ecx
movl 20(%edi),%edx
adcl $0,%ebp
# mul a[1]*b[5]
mull %edx
addl %eax,%ebx
movl (%esi),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
# mul a[0]*b[6]
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,24(%eax)
movl 28(%esi),%eax
# saved r[6]
# ################## Calculate word 7
xorl %ebx,%ebx
# mul a[7]*b[0]
mull %edx
addl %eax,%ecx
movl 24(%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
# mul a[6]*b[1]
mull %edx
addl %eax,%ecx
movl 20(%esi),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
# mul a[5]*b[2]
mull %edx
addl %eax,%ecx
movl 16(%esi),%eax
adcl %edx,%ebp
movl 12(%edi),%edx
adcl $0,%ebx
# mul a[4]*b[3]
mull %edx
addl %eax,%ecx
movl 12(%esi),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
# mul a[3]*b[4]
mull %edx
addl %eax,%ecx
movl 8(%esi),%eax
adcl %edx,%ebp
movl 20(%edi),%edx
adcl $0,%ebx
# mul a[2]*b[5]
mull %edx
addl %eax,%ecx
movl 4(%esi),%eax
adcl %edx,%ebp
movl 24(%edi),%edx
adcl $0,%ebx
# mul a[1]*b[6]
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
# mul a[0]*b[7]
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
movl %ecx,28(%eax)
movl 28(%esi),%eax
# saved r[7]
# ################## Calculate word 8
xorl %ecx,%ecx
# mul a[7]*b[1]
mull %edx
addl %eax,%ebp
movl 24(%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
# mul a[6]*b[2]
mull %edx
addl %eax,%ebp
movl 20(%esi),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
# mul a[5]*b[3]
mull %edx
addl %eax,%ebp
movl 16(%esi),%eax
adcl %edx,%ebx
movl 16(%edi),%edx
adcl $0,%ecx
# mul a[4]*b[4]
mull %edx
addl %eax,%ebp
movl 12(%esi),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
# mul a[3]*b[5]
mull %edx
addl %eax,%ebp
movl 8(%esi),%eax
adcl %edx,%ebx
movl 24(%edi),%edx
adcl $0,%ecx
# mul a[2]*b[6]
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 28(%edi),%edx
adcl $0,%ecx
# mul a[1]*b[7]
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
movl %ebp,32(%eax)
movl 28(%esi),%eax
# saved r[8]
# ################## Calculate word 9
xorl %ebp,%ebp
# mul a[7]*b[2]
mull %edx
addl %eax,%ebx
movl 24(%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
# mul a[6]*b[3]
mull %edx
addl %eax,%ebx
movl 20(%esi),%eax
adcl %edx,%ecx
movl 16(%edi),%edx
adcl $0,%ebp
# mul a[5]*b[4]
mull %edx
addl %eax,%ebx
movl 16(%esi),%eax
adcl %edx,%ecx
movl 20(%edi),%edx
adcl $0,%ebp
# mul a[4]*b[5]
mull %edx
addl %eax,%ebx
movl 12(%esi),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
# mul a[3]*b[6]
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 28(%edi),%edx
adcl $0,%ebp
# mul a[2]*b[7]
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
movl %ebx,36(%eax)
movl 28(%esi),%eax
# saved r[9]
# ################## Calculate word 10
xorl %ebx,%ebx
# mul a[7]*b[3]
mull %edx
addl %eax,%ecx
movl 24(%esi),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
# mul a[6]*b[4]
mull %edx
addl %eax,%ecx
movl 20(%esi),%eax
adcl %edx,%ebp
movl 20(%edi),%edx
adcl $0,%ebx
# mul a[5]*b[5]
mull %edx
addl %eax,%ecx
movl 16(%esi),%eax
adcl %edx,%ebp
movl 24(%edi),%edx
adcl $0,%ebx
# mul a[4]*b[6]
mull %edx
addl %eax,%ecx
movl 12(%esi),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
# mul a[3]*b[7]
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
movl %ecx,40(%eax)
movl 28(%esi),%eax
# saved r[10]
# ################## Calculate word 11
xorl %ecx,%ecx
# mul a[7]*b[4]
mull %edx
addl %eax,%ebp
movl 24(%esi),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
# mul a[6]*b[5]
mull %edx
addl %eax,%ebp
movl 20(%esi),%eax
adcl %edx,%ebx
movl 24(%edi),%edx
adcl $0,%ecx
# mul a[5]*b[6]
mull %edx
addl %eax,%ebp
movl 16(%esi),%eax
adcl %edx,%ebx
movl 28(%edi),%edx
adcl $0,%ecx
# mul a[4]*b[7]
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
movl %ebp,44(%eax)
movl 28(%esi),%eax
# saved r[11]
# ################## Calculate word 12
xorl %ebp,%ebp
# mul a[7]*b[5]
mull %edx
addl %eax,%ebx
movl 24(%esi),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
# mul a[6]*b[6]
mull %edx
addl %eax,%ebx
movl 20(%esi),%eax
adcl %edx,%ecx
movl 28(%edi),%edx
adcl $0,%ebp
# mul a[5]*b[7]
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
movl %ebx,48(%eax)
movl 28(%esi),%eax
# saved r[12]
# ################## Calculate word 13
xorl %ebx,%ebx
# mul a[7]*b[6]
mull %edx
addl %eax,%ecx
movl 24(%esi),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
# mul a[6]*b[7]
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
movl %ecx,52(%eax)
movl 28(%esi),%eax
# saved r[13]
# ################## Calculate word 14
xorl %ecx,%ecx
# mul a[7]*b[7]
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
adcl $0,%ecx
movl %ebp,56(%eax)
# saved r[14]
# save r[15]
movl %ebx,60(%eax)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.globl _bn_mul_comba4
.private_extern _bn_mul_comba4
.align 4
_bn_mul_comba4:
L_bn_mul_comba4_begin:
pushl %esi
movl 12(%esp),%esi
pushl %edi
movl 20(%esp),%edi
pushl %ebp
pushl %ebx
xorl %ebx,%ebx
movl (%esi),%eax
xorl %ecx,%ecx
movl (%edi),%edx
# ################## Calculate word 0
xorl %ebp,%ebp
# mul a[0]*b[0]
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,(%eax)
movl 4(%esi),%eax
# saved r[0]
# ################## Calculate word 1
xorl %ebx,%ebx
# mul a[1]*b[0]
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
# mul a[0]*b[1]
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl (%edi),%edx
adcl $0,%ebx
movl %ecx,4(%eax)
movl 8(%esi),%eax
# saved r[1]
# ################## Calculate word 2
xorl %ecx,%ecx
# mul a[2]*b[0]
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 4(%edi),%edx
adcl $0,%ecx
# mul a[1]*b[1]
mull %edx
addl %eax,%ebp
movl (%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
# mul a[0]*b[2]
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl (%edi),%edx
adcl $0,%ecx
movl %ebp,8(%eax)
movl 12(%esi),%eax
# saved r[2]
# ################## Calculate word 3
xorl %ebp,%ebp
# mul a[3]*b[0]
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
# mul a[2]*b[1]
mull %edx
addl %eax,%ebx
movl 4(%esi),%eax
adcl %edx,%ecx
movl 8(%edi),%edx
adcl $0,%ebp
# mul a[1]*b[2]
mull %edx
addl %eax,%ebx
movl (%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
# mul a[0]*b[3]
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
movl %ebx,12(%eax)
movl 12(%esi),%eax
# saved r[3]
# ################## Calculate word 4
xorl %ebx,%ebx
# mul a[3]*b[1]
mull %edx
addl %eax,%ecx
movl 8(%esi),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
# mul a[2]*b[2]
mull %edx
addl %eax,%ecx
movl 4(%esi),%eax
adcl %edx,%ebp
movl 12(%edi),%edx
adcl $0,%ebx
# mul a[1]*b[3]
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
movl %ecx,16(%eax)
movl 12(%esi),%eax
# saved r[4]
# ################## Calculate word 5
xorl %ecx,%ecx
# mul a[3]*b[2]
mull %edx
addl %eax,%ebp
movl 8(%esi),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
# mul a[2]*b[3]
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
movl %ebp,20(%eax)
movl 12(%esi),%eax
# saved r[5]
# ################## Calculate word 6
xorl %ebp,%ebp
# mul a[3]*b[3]
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
adcl $0,%ebp
movl %ebx,24(%eax)
# saved r[6]
# save r[7]
movl %ecx,28(%eax)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.globl _bn_sqr_comba8
.private_extern _bn_sqr_comba8
.align 4
_bn_sqr_comba8:
L_bn_sqr_comba8_begin:
pushl %esi
pushl %edi
pushl %ebp
pushl %ebx
movl 20(%esp),%edi
movl 24(%esp),%esi
xorl %ebx,%ebx
xorl %ecx,%ecx
movl (%esi),%eax
# ############### Calculate word 0
xorl %ebp,%ebp
# sqr a[0]*a[0]
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl (%esi),%edx
adcl $0,%ebp
movl %ebx,(%edi)
movl 4(%esi),%eax
# saved r[0]
# ############### Calculate word 1
xorl %ebx,%ebx
# sqr a[1]*a[0]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
movl %ecx,4(%edi)
movl (%esi),%edx
# saved r[1]
# ############### Calculate word 2
xorl %ecx,%ecx
# sqr a[2]*a[0]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 4(%esi),%eax
adcl $0,%ecx
# sqr a[1]*a[1]
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
movl (%esi),%edx
adcl $0,%ecx
movl %ebp,8(%edi)
movl 12(%esi),%eax
# saved r[2]
# ############### Calculate word 3
xorl %ebp,%ebp
# sqr a[3]*a[0]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 8(%esi),%eax
adcl $0,%ebp
movl 4(%esi),%edx
# sqr a[2]*a[1]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 16(%esi),%eax
adcl $0,%ebp
movl %ebx,12(%edi)
movl (%esi),%edx
# saved r[3]
# ############### Calculate word 4
xorl %ebx,%ebx
# sqr a[4]*a[0]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 12(%esi),%eax
adcl $0,%ebx
movl 4(%esi),%edx
# sqr a[3]*a[1]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
# sqr a[2]*a[2]
mull %eax
addl %eax,%ecx
adcl %edx,%ebp
movl (%esi),%edx
adcl $0,%ebx
movl %ecx,16(%edi)
movl 20(%esi),%eax
# saved r[4]
# ############### Calculate word 5
xorl %ecx,%ecx
# sqr a[5]*a[0]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 16(%esi),%eax
adcl $0,%ecx
movl 4(%esi),%edx
# sqr a[4]*a[1]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 12(%esi),%eax
adcl $0,%ecx
movl 8(%esi),%edx
# sqr a[3]*a[2]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 24(%esi),%eax
adcl $0,%ecx
movl %ebp,20(%edi)
movl (%esi),%edx
# saved r[5]
# ############### Calculate word 6
xorl %ebp,%ebp
# sqr a[6]*a[0]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 20(%esi),%eax
adcl $0,%ebp
movl 4(%esi),%edx
# sqr a[5]*a[1]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 16(%esi),%eax
adcl $0,%ebp
movl 8(%esi),%edx
# sqr a[4]*a[2]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 12(%esi),%eax
adcl $0,%ebp
# sqr a[3]*a[3]
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl (%esi),%edx
adcl $0,%ebp
movl %ebx,24(%edi)
movl 28(%esi),%eax
# saved r[6]
# ############### Calculate word 7
xorl %ebx,%ebx
# sqr a[7]*a[0]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 24(%esi),%eax
adcl $0,%ebx
movl 4(%esi),%edx
# sqr a[6]*a[1]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 20(%esi),%eax
adcl $0,%ebx
movl 8(%esi),%edx
# sqr a[5]*a[2]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 16(%esi),%eax
adcl $0,%ebx
movl 12(%esi),%edx
# sqr a[4]*a[3]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 28(%esi),%eax
adcl $0,%ebx
movl %ecx,28(%edi)
movl 4(%esi),%edx
# saved r[7]
# ############### Calculate word 8
xorl %ecx,%ecx
# sqr a[7]*a[1]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 24(%esi),%eax
adcl $0,%ecx
movl 8(%esi),%edx
# sqr a[6]*a[2]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 20(%esi),%eax
adcl $0,%ecx
movl 12(%esi),%edx
# sqr a[5]*a[3]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 16(%esi),%eax
adcl $0,%ecx
# sqr a[4]*a[4]
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
movl 8(%esi),%edx
adcl $0,%ecx
movl %ebp,32(%edi)
movl 28(%esi),%eax
# saved r[8]
# ############### Calculate word 9
xorl %ebp,%ebp
# sqr a[7]*a[2]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 24(%esi),%eax
adcl $0,%ebp
movl 12(%esi),%edx
# sqr a[6]*a[3]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 20(%esi),%eax
adcl $0,%ebp
movl 16(%esi),%edx
# sqr a[5]*a[4]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 28(%esi),%eax
adcl $0,%ebp
movl %ebx,36(%edi)
movl 12(%esi),%edx
# saved r[9]
# ############### Calculate word 10
xorl %ebx,%ebx
# sqr a[7]*a[3]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 24(%esi),%eax
adcl $0,%ebx
movl 16(%esi),%edx
# sqr a[6]*a[4]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 20(%esi),%eax
adcl $0,%ebx
# sqr a[5]*a[5]
mull %eax
addl %eax,%ecx
adcl %edx,%ebp
movl 16(%esi),%edx
adcl $0,%ebx
movl %ecx,40(%edi)
movl 28(%esi),%eax
# saved r[10]
# ############### Calculate word 11
xorl %ecx,%ecx
# sqr a[7]*a[4]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 24(%esi),%eax
adcl $0,%ecx
movl 20(%esi),%edx
# sqr a[6]*a[5]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 28(%esi),%eax
adcl $0,%ecx
movl %ebp,44(%edi)
movl 20(%esi),%edx
# saved r[11]
# ############### Calculate word 12
xorl %ebp,%ebp
# sqr a[7]*a[5]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 24(%esi),%eax
adcl $0,%ebp
# sqr a[6]*a[6]
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl 24(%esi),%edx
adcl $0,%ebp
movl %ebx,48(%edi)
movl 28(%esi),%eax
# saved r[12]
# ############### Calculate word 13
xorl %ebx,%ebx
# sqr a[7]*a[6]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 28(%esi),%eax
adcl $0,%ebx
movl %ecx,52(%edi)
# saved r[13]
# ############### Calculate word 14
xorl %ecx,%ecx
# sqr a[7]*a[7]
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
adcl $0,%ecx
movl %ebp,56(%edi)
# saved r[14]
movl %ebx,60(%edi)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.globl _bn_sqr_comba4
.private_extern _bn_sqr_comba4
.align 4
_bn_sqr_comba4:
L_bn_sqr_comba4_begin:
pushl %esi
pushl %edi
pushl %ebp
pushl %ebx
movl 20(%esp),%edi
movl 24(%esp),%esi
xorl %ebx,%ebx
xorl %ecx,%ecx
movl (%esi),%eax
# ############### Calculate word 0
xorl %ebp,%ebp
# sqr a[0]*a[0]
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl (%esi),%edx
adcl $0,%ebp
movl %ebx,(%edi)
movl 4(%esi),%eax
# saved r[0]
# ############### Calculate word 1
xorl %ebx,%ebx
# sqr a[1]*a[0]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
movl %ecx,4(%edi)
movl (%esi),%edx
# saved r[1]
# ############### Calculate word 2
xorl %ecx,%ecx
# sqr a[2]*a[0]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 4(%esi),%eax
adcl $0,%ecx
# sqr a[1]*a[1]
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
movl (%esi),%edx
adcl $0,%ecx
movl %ebp,8(%edi)
movl 12(%esi),%eax
# saved r[2]
# ############### Calculate word 3
xorl %ebp,%ebp
# sqr a[3]*a[0]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 8(%esi),%eax
adcl $0,%ebp
movl 4(%esi),%edx
# sqr a[2]*a[1]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 12(%esi),%eax
adcl $0,%ebp
movl %ebx,12(%edi)
movl 4(%esi),%edx
# saved r[3]
# ############### Calculate word 4
xorl %ebx,%ebx
# sqr a[3]*a[1]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
# sqr a[2]*a[2]
mull %eax
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%edx
adcl $0,%ebx
movl %ecx,16(%edi)
movl 12(%esi),%eax
# saved r[4]
# ############### Calculate word 5
xorl %ecx,%ecx
# sqr a[3]*a[2]
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 12(%esi),%eax
adcl $0,%ecx
movl %ebp,20(%edi)
# saved r[5]
# ############### Calculate word 6
xorl %ebp,%ebp
# sqr a[3]*a[3]
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
adcl $0,%ebp
movl %ebx,24(%edi)
# saved r[6]
movl %ecx,28(%edi)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 12,051
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/md5-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _md5_block_asm_data_order
.private_extern _md5_block_asm_data_order
.align 4
_md5_block_asm_data_order:
L_md5_block_asm_data_order_begin:
pushl %esi
pushl %edi
movl 12(%esp),%edi
movl 16(%esp),%esi
movl 20(%esp),%ecx
pushl %ebp
shll $6,%ecx
pushl %ebx
addl %esi,%ecx
subl $64,%ecx
movl (%edi),%eax
pushl %ecx
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
L000start:
# R0 section
movl %ecx,%edi
movl (%esi),%ebp
# R0 0
xorl %edx,%edi
andl %ebx,%edi
leal 3614090360(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 4(%esi),%ebp
addl %ebx,%eax
# R0 1
xorl %ecx,%edi
andl %eax,%edi
leal 3905402710(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 8(%esi),%ebp
addl %eax,%edx
# R0 2
xorl %ebx,%edi
andl %edx,%edi
leal 606105819(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 12(%esi),%ebp
addl %edx,%ecx
# R0 3
xorl %eax,%edi
andl %ecx,%edi
leal 3250441966(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 16(%esi),%ebp
addl %ecx,%ebx
# R0 4
xorl %edx,%edi
andl %ebx,%edi
leal 4118548399(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 20(%esi),%ebp
addl %ebx,%eax
# R0 5
xorl %ecx,%edi
andl %eax,%edi
leal 1200080426(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 24(%esi),%ebp
addl %eax,%edx
# R0 6
xorl %ebx,%edi
andl %edx,%edi
leal 2821735955(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 28(%esi),%ebp
addl %edx,%ecx
# R0 7
xorl %eax,%edi
andl %ecx,%edi
leal 4249261313(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 32(%esi),%ebp
addl %ecx,%ebx
# R0 8
xorl %edx,%edi
andl %ebx,%edi
leal 1770035416(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 36(%esi),%ebp
addl %ebx,%eax
# R0 9
xorl %ecx,%edi
andl %eax,%edi
leal 2336552879(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 40(%esi),%ebp
addl %eax,%edx
# R0 10
xorl %ebx,%edi
andl %edx,%edi
leal 4294925233(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 44(%esi),%ebp
addl %edx,%ecx
# R0 11
xorl %eax,%edi
andl %ecx,%edi
leal 2304563134(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 48(%esi),%ebp
addl %ecx,%ebx
# R0 12
xorl %edx,%edi
andl %ebx,%edi
leal 1804603682(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 52(%esi),%ebp
addl %ebx,%eax
# R0 13
xorl %ecx,%edi
andl %eax,%edi
leal 4254626195(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 56(%esi),%ebp
addl %eax,%edx
# R0 14
xorl %ebx,%edi
andl %edx,%edi
leal 2792965006(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 60(%esi),%ebp
addl %edx,%ecx
# R0 15
xorl %eax,%edi
andl %ecx,%edi
leal 1236535329(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 4(%esi),%ebp
addl %ecx,%ebx
# R1 section
# R1 16
leal 4129170786(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 24(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
# R1 17
leal 3225465664(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 44(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
# R1 18
leal 643717713(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl (%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
# R1 19
leal 3921069994(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 20(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
# R1 20
leal 3593408605(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 40(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
# R1 21
leal 38016083(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 60(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
# R1 22
leal 3634488961(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl 16(%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
# R1 23
leal 3889429448(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 36(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
# R1 24
leal 568446438(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 56(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
# R1 25
leal 3275163606(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 12(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
# R1 26
leal 4107603335(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl 32(%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
# R1 27
leal 1163531501(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 52(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
# R1 28
leal 2850285829(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 8(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
# R1 29
leal 4243563512(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 28(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
# R1 30
leal 1735328473(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl 48(%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
# R1 31
leal 2368359562(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 20(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
# R2 section
# R2 32
xorl %edx,%edi
xorl %ebx,%edi
leal 4294588738(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl 32(%esi),%ebp
movl %ebx,%edi
# R2 33
leal 2272392833(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 44(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
# R2 34
xorl %ebx,%edi
xorl %edx,%edi
leal 1839030562(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 56(%esi),%ebp
movl %edx,%edi
# R2 35
leal 4259657740(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl 4(%esi),%ebp
addl %edi,%ebx
movl %ecx,%edi
roll $23,%ebx
addl %ecx,%ebx
# R2 36
xorl %edx,%edi
xorl %ebx,%edi
leal 2763975236(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl 16(%esi),%ebp
movl %ebx,%edi
# R2 37
leal 1272893353(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 28(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
# R2 38
xorl %ebx,%edi
xorl %edx,%edi
leal 4139469664(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 40(%esi),%ebp
movl %edx,%edi
# R2 39
leal 3200236656(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl 52(%esi),%ebp
addl %edi,%ebx
movl %ecx,%edi
roll $23,%ebx
addl %ecx,%ebx
# R2 40
xorl %edx,%edi
xorl %ebx,%edi
leal 681279174(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl (%esi),%ebp
movl %ebx,%edi
# R2 41
leal 3936430074(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 12(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
# R2 42
xorl %ebx,%edi
xorl %edx,%edi
leal 3572445317(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 24(%esi),%ebp
movl %edx,%edi
# R2 43
leal 76029189(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl 36(%esi),%ebp
addl %edi,%ebx
movl %ecx,%edi
roll $23,%ebx
addl %ecx,%ebx
# R2 44
xorl %edx,%edi
xorl %ebx,%edi
leal 3654602809(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl 48(%esi),%ebp
movl %ebx,%edi
# R2 45
leal 3873151461(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 60(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
# R2 46
xorl %ebx,%edi
xorl %edx,%edi
leal 530742520(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 8(%esi),%ebp
movl %edx,%edi
# R2 47
leal 3299628645(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl (%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $23,%ebx
addl %ecx,%ebx
# R3 section
# R3 48
xorl %edx,%edi
orl %ebx,%edi
leal 4096336452(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 28(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
# R3 49
orl %eax,%edi
leal 1126891415(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 56(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
# R3 50
orl %edx,%edi
leal 2878612391(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 20(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
# R3 51
orl %ecx,%edi
leal 4237533241(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 48(%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $21,%ebx
xorl %edx,%edi
addl %ecx,%ebx
# R3 52
orl %ebx,%edi
leal 1700485571(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 12(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
# R3 53
orl %eax,%edi
leal 2399980690(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 40(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
# R3 54
orl %edx,%edi
leal 4293915773(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 4(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
# R3 55
orl %ecx,%edi
leal 2240044497(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 32(%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $21,%ebx
xorl %edx,%edi
addl %ecx,%ebx
# R3 56
orl %ebx,%edi
leal 1873313359(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 60(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
# R3 57
orl %eax,%edi
leal 4264355552(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 24(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
# R3 58
orl %edx,%edi
leal 2734768916(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 52(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
# R3 59
orl %ecx,%edi
leal 1309151649(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 16(%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $21,%ebx
xorl %edx,%edi
addl %ecx,%ebx
# R3 60
orl %ebx,%edi
leal 4149444226(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 44(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
# R3 61
orl %eax,%edi
leal 3174756917(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 8(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
# R3 62
orl %edx,%edi
leal 718787259(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 36(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
# R3 63
orl %ecx,%edi
leal 3951481745(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 24(%esp),%ebp
addl %edi,%ebx
addl $64,%esi
roll $21,%ebx
movl (%ebp),%edi
addl %ecx,%ebx
addl %edi,%eax
movl 4(%ebp),%edi
addl %edi,%ebx
movl 8(%ebp),%edi
addl %edi,%ecx
movl 12(%ebp),%edi
addl %edi,%edx
movl %eax,(%ebp)
movl %ebx,4(%ebp)
movl (%esp),%edi
movl %ecx,8(%ebp)
movl %edx,12(%ebp)
cmpl %esi,%edi
jae L000start
popl %eax
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 49,856
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/aesni-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.globl _aes_hw_encrypt
.private_extern _aes_hw_encrypt
.align 4
_aes_hw_encrypt:
L_aes_hw_encrypt_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call L000pic
L000pic:
popl %ebx
leal _BORINGSSL_function_hit+1-L000pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 12(%esp),%edx
movups (%eax),%xmm2
movl 240(%edx),%ecx
movl 8(%esp),%eax
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L001enc1_loop_1:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L001enc1_loop_1
.byte 102,15,56,221,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movups %xmm2,(%eax)
pxor %xmm2,%xmm2
ret
.globl _aes_hw_decrypt
.private_extern _aes_hw_decrypt
.align 4
_aes_hw_decrypt:
L_aes_hw_decrypt_begin:
movl 4(%esp),%eax
movl 12(%esp),%edx
movups (%eax),%xmm2
movl 240(%edx),%ecx
movl 8(%esp),%eax
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L002dec1_loop_2:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L002dec1_loop_2
.byte 102,15,56,223,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movups %xmm2,(%eax)
pxor %xmm2,%xmm2
ret
.private_extern __aesni_encrypt2
.align 4
__aesni_encrypt2:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
L003enc2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz L003enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.private_extern __aesni_decrypt2
.align 4
__aesni_decrypt2:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
L004dec2_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
movups -16(%edx,%ecx,1),%xmm0
jnz L004dec2_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,223,208
.byte 102,15,56,223,216
ret
.private_extern __aesni_encrypt3
.align 4
__aesni_encrypt3:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
L005enc3_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%edx,%ecx,1),%xmm0
jnz L005enc3_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.private_extern __aesni_decrypt3
.align 4
__aesni_decrypt3:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
L006dec3_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
movups -16(%edx,%ecx,1),%xmm0
jnz L006dec3_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,223,208
.byte 102,15,56,223,216
.byte 102,15,56,223,224
ret
.private_extern __aesni_encrypt4
.align 4
__aesni_encrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 15,31,64,0
addl $16,%ecx
L007enc4_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%edx,%ecx,1),%xmm0
jnz L007enc4_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.private_extern __aesni_decrypt4
.align 4
__aesni_decrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 15,31,64,0
addl $16,%ecx
L008dec4_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,222,233
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
.byte 102,15,56,222,232
movups -16(%edx,%ecx,1),%xmm0
jnz L008dec4_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,223,208
.byte 102,15,56,223,216
.byte 102,15,56,223,224
.byte 102,15,56,223,232
ret
.private_extern __aesni_encrypt6
.align 4
__aesni_encrypt6:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,217
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%edx,%ecx,1),%xmm0
addl $16,%ecx
jmp L009_aesni_encrypt6_inner
.align 4,0x90
L010enc6_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
L009_aesni_encrypt6_inner:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
L_aesni_encrypt6_enter:
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%edx,%ecx,1),%xmm0
jnz L010enc6_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.private_extern __aesni_decrypt6
.align 4
__aesni_decrypt6:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,222,209
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,222,217
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 102,15,56,222,225
pxor %xmm0,%xmm7
movups (%edx,%ecx,1),%xmm0
addl $16,%ecx
jmp L011_aesni_decrypt6_inner
.align 4,0x90
L012dec6_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
L011_aesni_decrypt6_inner:
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
L_aesni_decrypt6_enter:
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
.byte 102,15,56,222,232
.byte 102,15,56,222,240
.byte 102,15,56,222,248
movups -16(%edx,%ecx,1),%xmm0
jnz L012dec6_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
.byte 102,15,56,223,208
.byte 102,15,56,223,216
.byte 102,15,56,223,224
.byte 102,15,56,223,232
.byte 102,15,56,223,240
.byte 102,15,56,223,248
ret
.globl _aes_hw_ecb_encrypt
.private_extern _aes_hw_ecb_encrypt
.align 4
_aes_hw_ecb_encrypt:
L_aes_hw_ecb_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
andl $-16,%eax
jz L013ecb_ret
movl 240(%edx),%ecx
testl %ebx,%ebx
jz L014ecb_decrypt
movl %edx,%ebp
movl %ecx,%ebx
cmpl $96,%eax
jb L015ecb_enc_tail
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
movdqu 48(%esi),%xmm5
movdqu 64(%esi),%xmm6
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
subl $96,%eax
jmp L016ecb_enc_loop6_enter
.align 4,0x90
L017ecb_enc_loop6:
movups %xmm2,(%edi)
movdqu (%esi),%xmm2
movups %xmm3,16(%edi)
movdqu 16(%esi),%xmm3
movups %xmm4,32(%edi)
movdqu 32(%esi),%xmm4
movups %xmm5,48(%edi)
movdqu 48(%esi),%xmm5
movups %xmm6,64(%edi)
movdqu 64(%esi),%xmm6
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
L016ecb_enc_loop6_enter:
call __aesni_encrypt6
movl %ebp,%edx
movl %ebx,%ecx
subl $96,%eax
jnc L017ecb_enc_loop6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
movups %xmm7,80(%edi)
leal 96(%edi),%edi
addl $96,%eax
jz L013ecb_ret
L015ecb_enc_tail:
movups (%esi),%xmm2
cmpl $32,%eax
jb L018ecb_enc_one
movups 16(%esi),%xmm3
je L019ecb_enc_two
movups 32(%esi),%xmm4
cmpl $64,%eax
jb L020ecb_enc_three
movups 48(%esi),%xmm5
je L021ecb_enc_four
movups 64(%esi),%xmm6
xorps %xmm7,%xmm7
call __aesni_encrypt6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp L013ecb_ret
.align 4,0x90
L018ecb_enc_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L022enc1_loop_3:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L022enc1_loop_3
.byte 102,15,56,221,209
movups %xmm2,(%edi)
jmp L013ecb_ret
.align 4,0x90
L019ecb_enc_two:
call __aesni_encrypt2
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp L013ecb_ret
.align 4,0x90
L020ecb_enc_three:
call __aesni_encrypt3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp L013ecb_ret
.align 4,0x90
L021ecb_enc_four:
call __aesni_encrypt4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
jmp L013ecb_ret
.align 4,0x90
L014ecb_decrypt:
movl %edx,%ebp
movl %ecx,%ebx
cmpl $96,%eax
jb L023ecb_dec_tail
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
movdqu 48(%esi),%xmm5
movdqu 64(%esi),%xmm6
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
subl $96,%eax
jmp L024ecb_dec_loop6_enter
.align 4,0x90
L025ecb_dec_loop6:
movups %xmm2,(%edi)
movdqu (%esi),%xmm2
movups %xmm3,16(%edi)
movdqu 16(%esi),%xmm3
movups %xmm4,32(%edi)
movdqu 32(%esi),%xmm4
movups %xmm5,48(%edi)
movdqu 48(%esi),%xmm5
movups %xmm6,64(%edi)
movdqu 64(%esi),%xmm6
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
L024ecb_dec_loop6_enter:
call __aesni_decrypt6
movl %ebp,%edx
movl %ebx,%ecx
subl $96,%eax
jnc L025ecb_dec_loop6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
movups %xmm7,80(%edi)
leal 96(%edi),%edi
addl $96,%eax
jz L013ecb_ret
L023ecb_dec_tail:
movups (%esi),%xmm2
cmpl $32,%eax
jb L026ecb_dec_one
movups 16(%esi),%xmm3
je L027ecb_dec_two
movups 32(%esi),%xmm4
cmpl $64,%eax
jb L028ecb_dec_three
movups 48(%esi),%xmm5
je L029ecb_dec_four
movups 64(%esi),%xmm6
xorps %xmm7,%xmm7
call __aesni_decrypt6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp L013ecb_ret
.align 4,0x90
L026ecb_dec_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L030dec1_loop_4:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L030dec1_loop_4
.byte 102,15,56,223,209
movups %xmm2,(%edi)
jmp L013ecb_ret
.align 4,0x90
L027ecb_dec_two:
call __aesni_decrypt2
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp L013ecb_ret
.align 4,0x90
L028ecb_dec_three:
call __aesni_decrypt3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp L013ecb_ret
.align 4,0x90
L029ecb_dec_four:
call __aesni_decrypt4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
L013ecb_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _aes_hw_ccm64_encrypt_blocks
.private_extern _aes_hw_ccm64_encrypt_blocks
.align 4
_aes_hw_ccm64_encrypt_blocks:
L_aes_hw_ccm64_encrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl 40(%esp),%ecx
movl %esp,%ebp
subl $60,%esp
andl $-16,%esp
movl %ebp,48(%esp)
movdqu (%ebx),%xmm7
movdqu (%ecx),%xmm3
movl 240(%edx),%ecx
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $1,%ebx
xorl %ebp,%ebp
movl %ebx,16(%esp)
movl %ebp,20(%esp)
movl %ebp,24(%esp)
movl %ebp,28(%esp)
shll $4,%ecx
movl $16,%ebx
leal (%edx),%ebp
movdqa (%esp),%xmm5
movdqa %xmm7,%xmm2
leal 32(%edx,%ecx,1),%edx
subl %ecx,%ebx
.byte 102,15,56,0,253
L031ccm64_enc_outer:
movups (%ebp),%xmm0
movl %ebx,%ecx
movups (%esi),%xmm6
xorps %xmm0,%xmm2
movups 16(%ebp),%xmm1
xorps %xmm6,%xmm0
xorps %xmm0,%xmm3
movups 32(%ebp),%xmm0
L032ccm64_enc2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz L032ccm64_enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
paddq 16(%esp),%xmm7
decl %eax
.byte 102,15,56,221,208
.byte 102,15,56,221,216
leal 16(%esi),%esi
xorps %xmm2,%xmm6
movdqa %xmm7,%xmm2
movups %xmm6,(%edi)
.byte 102,15,56,0,213
leal 16(%edi),%edi
jnz L031ccm64_enc_outer
movl 48(%esp),%esp
movl 40(%esp),%edi
movups %xmm3,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _aes_hw_ccm64_decrypt_blocks
.private_extern _aes_hw_ccm64_decrypt_blocks
.align 4
_aes_hw_ccm64_decrypt_blocks:
L_aes_hw_ccm64_decrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl 40(%esp),%ecx
movl %esp,%ebp
subl $60,%esp
andl $-16,%esp
movl %ebp,48(%esp)
movdqu (%ebx),%xmm7
movdqu (%ecx),%xmm3
movl 240(%edx),%ecx
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $1,%ebx
xorl %ebp,%ebp
movl %ebx,16(%esp)
movl %ebp,20(%esp)
movl %ebp,24(%esp)
movl %ebp,28(%esp)
movdqa (%esp),%xmm5
movdqa %xmm7,%xmm2
movl %edx,%ebp
movl %ecx,%ebx
.byte 102,15,56,0,253
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L033enc1_loop_5:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L033enc1_loop_5
.byte 102,15,56,221,209
shll $4,%ebx
movl $16,%ecx
movups (%esi),%xmm6
paddq 16(%esp),%xmm7
leal 16(%esi),%esi
subl %ebx,%ecx
leal 32(%ebp,%ebx,1),%edx
movl %ecx,%ebx
jmp L034ccm64_dec_outer
.align 4,0x90
L034ccm64_dec_outer:
xorps %xmm2,%xmm6
movdqa %xmm7,%xmm2
movups %xmm6,(%edi)
leal 16(%edi),%edi
.byte 102,15,56,0,213
subl $1,%eax
jz L035ccm64_dec_break
movups (%ebp),%xmm0
movl %ebx,%ecx
movups 16(%ebp),%xmm1
xorps %xmm0,%xmm6
xorps %xmm0,%xmm2
xorps %xmm6,%xmm3
movups 32(%ebp),%xmm0
L036ccm64_dec2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz L036ccm64_dec2_loop
movups (%esi),%xmm6
paddq 16(%esp),%xmm7
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
leal 16(%esi),%esi
jmp L034ccm64_dec_outer
.align 4,0x90
L035ccm64_dec_break:
movl 240(%ebp),%ecx
movl %ebp,%edx
movups (%edx),%xmm0
movups 16(%edx),%xmm1
xorps %xmm0,%xmm6
leal 32(%edx),%edx
xorps %xmm6,%xmm3
L037enc1_loop_6:
.byte 102,15,56,220,217
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L037enc1_loop_6
.byte 102,15,56,221,217
movl 48(%esp),%esp
movl 40(%esp),%edi
movups %xmm3,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _aes_hw_ctr32_encrypt_blocks
.private_extern _aes_hw_ctr32_encrypt_blocks
.align 4
_aes_hw_ctr32_encrypt_blocks:
L_aes_hw_ctr32_encrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call L038pic
L038pic:
popl %ebx
leal _BORINGSSL_function_hit+0-L038pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $88,%esp
andl $-16,%esp
movl %ebp,80(%esp)
cmpl $1,%eax
jb L039ctr32_ret
je L040ctr32_one_shortcut
movdqu (%ebx),%xmm7
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $6,%ecx
xorl %ebp,%ebp
movl %ecx,16(%esp)
movl %ecx,20(%esp)
movl %ecx,24(%esp)
movl %ebp,28(%esp)
.byte 102,15,58,22,251,3
.byte 102,15,58,34,253,3
movl 240(%edx),%ecx
bswap %ebx
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movdqa (%esp),%xmm2
.byte 102,15,58,34,195,0
leal 3(%ebx),%ebp
.byte 102,15,58,34,205,0
incl %ebx
.byte 102,15,58,34,195,1
incl %ebp
.byte 102,15,58,34,205,1
incl %ebx
.byte 102,15,58,34,195,2
incl %ebp
.byte 102,15,58,34,205,2
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
movdqu (%edx),%xmm6
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
pshufd $192,%xmm0,%xmm2
pshufd $128,%xmm0,%xmm3
cmpl $6,%eax
jb L041ctr32_tail
pxor %xmm6,%xmm7
shll $4,%ecx
movl $16,%ebx
movdqa %xmm7,32(%esp)
movl %edx,%ebp
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
subl $6,%eax
jmp L042ctr32_loop6
.align 4,0x90
L042ctr32_loop6:
pshufd $64,%xmm0,%xmm4
movdqa 32(%esp),%xmm0
pshufd $192,%xmm1,%xmm5
pxor %xmm0,%xmm2
pshufd $128,%xmm1,%xmm6
pxor %xmm0,%xmm3
pshufd $64,%xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
.byte 102,15,56,220,209
pxor %xmm0,%xmm6
pxor %xmm0,%xmm7
.byte 102,15,56,220,217
movups 32(%ebp),%xmm0
movl %ebx,%ecx
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
call L_aesni_encrypt6_enter
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups %xmm2,(%edi)
movdqa 16(%esp),%xmm0
xorps %xmm1,%xmm4
movdqa 64(%esp),%xmm1
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
paddd %xmm0,%xmm1
paddd 48(%esp),%xmm0
movdqa (%esp),%xmm2
movups 48(%esi),%xmm3
movups 64(%esi),%xmm4
xorps %xmm3,%xmm5
movups 80(%esi),%xmm3
leal 96(%esi),%esi
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
xorps %xmm4,%xmm6
movups %xmm5,48(%edi)
xorps %xmm3,%xmm7
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
movups %xmm6,64(%edi)
pshufd $192,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
pshufd $128,%xmm0,%xmm3
subl $6,%eax
jnc L042ctr32_loop6
addl $6,%eax
jz L039ctr32_ret
movdqu (%ebp),%xmm7
movl %ebp,%edx
pxor 32(%esp),%xmm7
movl 240(%ebp),%ecx
L041ctr32_tail:
por %xmm7,%xmm2
cmpl $2,%eax
jb L043ctr32_one
pshufd $64,%xmm0,%xmm4
por %xmm7,%xmm3
je L044ctr32_two
pshufd $192,%xmm1,%xmm5
por %xmm7,%xmm4
cmpl $4,%eax
jb L045ctr32_three
pshufd $128,%xmm1,%xmm6
por %xmm7,%xmm5
je L046ctr32_four
por %xmm7,%xmm6
call __aesni_encrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups 48(%esi),%xmm0
xorps %xmm1,%xmm4
movups 64(%esi),%xmm1
xorps %xmm0,%xmm5
movups %xmm2,(%edi)
xorps %xmm1,%xmm6
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp L039ctr32_ret
.align 4,0x90
L040ctr32_one_shortcut:
movups (%ebx),%xmm2
movl 240(%edx),%ecx
L043ctr32_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L047enc1_loop_7:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L047enc1_loop_7
.byte 102,15,56,221,209
movups (%esi),%xmm6
xorps %xmm2,%xmm6
movups %xmm6,(%edi)
jmp L039ctr32_ret
.align 4,0x90
L044ctr32_two:
call __aesni_encrypt2
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp L039ctr32_ret
.align 4,0x90
L045ctr32_three:
call __aesni_encrypt3
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
movups 32(%esi),%xmm7
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
xorps %xmm7,%xmm4
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp L039ctr32_ret
.align 4,0x90
L046ctr32_four:
call __aesni_encrypt4
movups (%esi),%xmm6
movups 16(%esi),%xmm7
movups 32(%esi),%xmm1
xorps %xmm6,%xmm2
movups 48(%esi),%xmm0
xorps %xmm7,%xmm3
movups %xmm2,(%edi)
xorps %xmm1,%xmm4
movups %xmm3,16(%edi)
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
L039ctr32_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movl 80(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _aes_hw_xts_encrypt
.private_extern _aes_hw_xts_encrypt
.align 4
_aes_hw_xts_encrypt:
L_aes_hw_xts_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 36(%esp),%edx
movl 40(%esp),%esi
movl 240(%edx),%ecx
movups (%esi),%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L048enc1_loop_8:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L048enc1_loop_8
.byte 102,15,56,221,209
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl %esp,%ebp
subl $120,%esp
movl 240(%edx),%ecx
andl $-16,%esp
movl $135,96(%esp)
movl $0,100(%esp)
movl $1,104(%esp)
movl $0,108(%esp)
movl %eax,112(%esp)
movl %ebp,116(%esp)
movdqa %xmm2,%xmm1
pxor %xmm0,%xmm0
movdqa 96(%esp),%xmm3
pcmpgtd %xmm1,%xmm0
andl $-16,%eax
movl %edx,%ebp
movl %ecx,%ebx
subl $96,%eax
jc L049xts_enc_short
shll $4,%ecx
movl $16,%ebx
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
jmp L050xts_enc_loop6
.align 4,0x90
L050xts_enc_loop6:
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,16(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,32(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,64(%esp)
paddq %xmm1,%xmm1
movups (%ebp),%xmm0
pand %xmm3,%xmm7
movups (%esi),%xmm2
pxor %xmm1,%xmm7
movl %ebx,%ecx
movdqu 16(%esi),%xmm3
xorps %xmm0,%xmm2
movdqu 32(%esi),%xmm4
pxor %xmm0,%xmm3
movdqu 48(%esi),%xmm5
pxor %xmm0,%xmm4
movdqu 64(%esi),%xmm6
pxor %xmm0,%xmm5
movdqu 80(%esi),%xmm1
pxor %xmm0,%xmm6
leal 96(%esi),%esi
pxor (%esp),%xmm2
movdqa %xmm7,80(%esp)
pxor %xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor 16(%esp),%xmm3
pxor 32(%esp),%xmm4
.byte 102,15,56,220,209
pxor 48(%esp),%xmm5
pxor 64(%esp),%xmm6
.byte 102,15,56,220,217
pxor %xmm0,%xmm7
movups 32(%ebp),%xmm0
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
call L_aesni_encrypt6_enter
movdqa 80(%esp),%xmm1
pxor %xmm0,%xmm0
xorps (%esp),%xmm2
pcmpgtd %xmm1,%xmm0
xorps 16(%esp),%xmm3
movups %xmm2,(%edi)
xorps 32(%esp),%xmm4
movups %xmm3,16(%edi)
xorps 48(%esp),%xmm5
movups %xmm4,32(%edi)
xorps 64(%esp),%xmm6
movups %xmm5,48(%edi)
xorps %xmm1,%xmm7
movups %xmm6,64(%edi)
pshufd $19,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqa 96(%esp),%xmm3
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
subl $96,%eax
jnc L050xts_enc_loop6
movl 240(%ebp),%ecx
movl %ebp,%edx
movl %ecx,%ebx
L049xts_enc_short:
addl $96,%eax
jz L051xts_enc_done6x
movdqa %xmm1,%xmm5
cmpl $32,%eax
jb L052xts_enc_one
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
je L053xts_enc_two
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm6
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
cmpl $64,%eax
jb L054xts_enc_three
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm7
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
movdqa %xmm5,(%esp)
movdqa %xmm6,16(%esp)
je L055xts_enc_four
movdqa %xmm7,32(%esp)
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm7
pxor %xmm1,%xmm7
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
pxor (%esp),%xmm2
movdqu 48(%esi),%xmm5
pxor 16(%esp),%xmm3
movdqu 64(%esi),%xmm6
pxor 32(%esp),%xmm4
leal 80(%esi),%esi
pxor 48(%esp),%xmm5
movdqa %xmm7,64(%esp)
pxor %xmm7,%xmm6
call __aesni_encrypt6
movaps 64(%esp),%xmm1
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps 32(%esp),%xmm4
movups %xmm2,(%edi)
xorps 48(%esp),%xmm5
movups %xmm3,16(%edi)
xorps %xmm1,%xmm6
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
leal 80(%edi),%edi
jmp L056xts_enc_done
.align 4,0x90
L052xts_enc_one:
movups (%esi),%xmm2
leal 16(%esi),%esi
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L057enc1_loop_9:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L057enc1_loop_9
.byte 102,15,56,221,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
leal 16(%edi),%edi
movdqa %xmm5,%xmm1
jmp L056xts_enc_done
.align 4,0x90
L053xts_enc_two:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
leal 32(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
call __aesni_encrypt2
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 32(%edi),%edi
movdqa %xmm6,%xmm1
jmp L056xts_enc_done
.align 4,0x90
L054xts_enc_three:
movaps %xmm1,%xmm7
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
leal 48(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
call __aesni_encrypt3
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
leal 48(%edi),%edi
movdqa %xmm7,%xmm1
jmp L056xts_enc_done
.align 4,0x90
L055xts_enc_four:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
xorps (%esp),%xmm2
movups 48(%esi),%xmm5
leal 64(%esi),%esi
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
xorps %xmm6,%xmm5
call __aesni_encrypt4
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
xorps %xmm6,%xmm5
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
leal 64(%edi),%edi
movdqa %xmm6,%xmm1
jmp L056xts_enc_done
.align 4,0x90
L051xts_enc_done6x:
movl 112(%esp),%eax
andl $15,%eax
jz L058xts_enc_ret
movdqa %xmm1,%xmm5
movl %eax,112(%esp)
jmp L059xts_enc_steal
.align 4,0x90
L056xts_enc_done:
movl 112(%esp),%eax
pxor %xmm0,%xmm0
andl $15,%eax
jz L058xts_enc_ret
pcmpgtd %xmm1,%xmm0
movl %eax,112(%esp)
pshufd $19,%xmm0,%xmm5
paddq %xmm1,%xmm1
pand 96(%esp),%xmm5
pxor %xmm1,%xmm5
L059xts_enc_steal:
movzbl (%esi),%ecx
movzbl -16(%edi),%edx
leal 1(%esi),%esi
movb %cl,-16(%edi)
movb %dl,(%edi)
leal 1(%edi),%edi
subl $1,%eax
jnz L059xts_enc_steal
subl 112(%esp),%edi
movl %ebp,%edx
movl %ebx,%ecx
movups -16(%edi),%xmm2
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L060enc1_loop_10:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L060enc1_loop_10
.byte 102,15,56,221,209
xorps %xmm5,%xmm2
movups %xmm2,-16(%edi)
L058xts_enc_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
movdqa %xmm0,(%esp)
pxor %xmm3,%xmm3
movdqa %xmm0,16(%esp)
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movdqa %xmm0,80(%esp)
movl 116(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _aes_hw_xts_decrypt
.private_extern _aes_hw_xts_decrypt
.align 4
_aes_hw_xts_decrypt:
L_aes_hw_xts_decrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 36(%esp),%edx
movl 40(%esp),%esi
movl 240(%edx),%ecx
movups (%esi),%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L061enc1_loop_11:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L061enc1_loop_11
.byte 102,15,56,221,209
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl %esp,%ebp
subl $120,%esp
andl $-16,%esp
xorl %ebx,%ebx
testl $15,%eax
setnz %bl
shll $4,%ebx
subl %ebx,%eax
movl $135,96(%esp)
movl $0,100(%esp)
movl $1,104(%esp)
movl $0,108(%esp)
movl %eax,112(%esp)
movl %ebp,116(%esp)
movl 240(%edx),%ecx
movl %edx,%ebp
movl %ecx,%ebx
movdqa %xmm2,%xmm1
pxor %xmm0,%xmm0
movdqa 96(%esp),%xmm3
pcmpgtd %xmm1,%xmm0
andl $-16,%eax
subl $96,%eax
jc L062xts_dec_short
shll $4,%ecx
movl $16,%ebx
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
jmp L063xts_dec_loop6
.align 4,0x90
L063xts_dec_loop6:
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,16(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,32(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,64(%esp)
paddq %xmm1,%xmm1
movups (%ebp),%xmm0
pand %xmm3,%xmm7
movups (%esi),%xmm2
pxor %xmm1,%xmm7
movl %ebx,%ecx
movdqu 16(%esi),%xmm3
xorps %xmm0,%xmm2
movdqu 32(%esi),%xmm4
pxor %xmm0,%xmm3
movdqu 48(%esi),%xmm5
pxor %xmm0,%xmm4
movdqu 64(%esi),%xmm6
pxor %xmm0,%xmm5
movdqu 80(%esi),%xmm1
pxor %xmm0,%xmm6
leal 96(%esi),%esi
pxor (%esp),%xmm2
movdqa %xmm7,80(%esp)
pxor %xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor 16(%esp),%xmm3
pxor 32(%esp),%xmm4
.byte 102,15,56,222,209
pxor 48(%esp),%xmm5
pxor 64(%esp),%xmm6
.byte 102,15,56,222,217
pxor %xmm0,%xmm7
movups 32(%ebp),%xmm0
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
call L_aesni_decrypt6_enter
movdqa 80(%esp),%xmm1
pxor %xmm0,%xmm0
xorps (%esp),%xmm2
pcmpgtd %xmm1,%xmm0
xorps 16(%esp),%xmm3
movups %xmm2,(%edi)
xorps 32(%esp),%xmm4
movups %xmm3,16(%edi)
xorps 48(%esp),%xmm5
movups %xmm4,32(%edi)
xorps 64(%esp),%xmm6
movups %xmm5,48(%edi)
xorps %xmm1,%xmm7
movups %xmm6,64(%edi)
pshufd $19,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqa 96(%esp),%xmm3
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
subl $96,%eax
jnc L063xts_dec_loop6
movl 240(%ebp),%ecx
movl %ebp,%edx
movl %ecx,%ebx
L062xts_dec_short:
addl $96,%eax
jz L064xts_dec_done6x
movdqa %xmm1,%xmm5
cmpl $32,%eax
jb L065xts_dec_one
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
je L066xts_dec_two
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm6
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
cmpl $64,%eax
jb L067xts_dec_three
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm7
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
movdqa %xmm5,(%esp)
movdqa %xmm6,16(%esp)
je L068xts_dec_four
movdqa %xmm7,32(%esp)
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm7
pxor %xmm1,%xmm7
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
pxor (%esp),%xmm2
movdqu 48(%esi),%xmm5
pxor 16(%esp),%xmm3
movdqu 64(%esi),%xmm6
pxor 32(%esp),%xmm4
leal 80(%esi),%esi
pxor 48(%esp),%xmm5
movdqa %xmm7,64(%esp)
pxor %xmm7,%xmm6
call __aesni_decrypt6
movaps 64(%esp),%xmm1
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps 32(%esp),%xmm4
movups %xmm2,(%edi)
xorps 48(%esp),%xmm5
movups %xmm3,16(%edi)
xorps %xmm1,%xmm6
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
leal 80(%edi),%edi
jmp L069xts_dec_done
.align 4,0x90
L065xts_dec_one:
movups (%esi),%xmm2
leal 16(%esi),%esi
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L070dec1_loop_12:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L070dec1_loop_12
.byte 102,15,56,223,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
leal 16(%edi),%edi
movdqa %xmm5,%xmm1
jmp L069xts_dec_done
.align 4,0x90
L066xts_dec_two:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
leal 32(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
call __aesni_decrypt2
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 32(%edi),%edi
movdqa %xmm6,%xmm1
jmp L069xts_dec_done
.align 4,0x90
L067xts_dec_three:
movaps %xmm1,%xmm7
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
leal 48(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
call __aesni_decrypt3
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
leal 48(%edi),%edi
movdqa %xmm7,%xmm1
jmp L069xts_dec_done
.align 4,0x90
L068xts_dec_four:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
xorps (%esp),%xmm2
movups 48(%esi),%xmm5
leal 64(%esi),%esi
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
xorps %xmm6,%xmm5
call __aesni_decrypt4
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
xorps %xmm6,%xmm5
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
leal 64(%edi),%edi
movdqa %xmm6,%xmm1
jmp L069xts_dec_done
.align 4,0x90
L064xts_dec_done6x:
movl 112(%esp),%eax
andl $15,%eax
jz L071xts_dec_ret
movl %eax,112(%esp)
jmp L072xts_dec_only_one_more
.align 4,0x90
L069xts_dec_done:
movl 112(%esp),%eax
pxor %xmm0,%xmm0
andl $15,%eax
jz L071xts_dec_ret
pcmpgtd %xmm1,%xmm0
movl %eax,112(%esp)
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa 96(%esp),%xmm3
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
L072xts_dec_only_one_more:
pshufd $19,%xmm0,%xmm5
movdqa %xmm1,%xmm6
paddq %xmm1,%xmm1
pand %xmm3,%xmm5
pxor %xmm1,%xmm5
movl %ebp,%edx
movl %ebx,%ecx
movups (%esi),%xmm2
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L073dec1_loop_13:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L073dec1_loop_13
.byte 102,15,56,223,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
L074xts_dec_steal:
movzbl 16(%esi),%ecx
movzbl (%edi),%edx
leal 1(%esi),%esi
movb %cl,(%edi)
movb %dl,16(%edi)
leal 1(%edi),%edi
subl $1,%eax
jnz L074xts_dec_steal
subl 112(%esp),%edi
movl %ebp,%edx
movl %ebx,%ecx
movups (%edi),%xmm2
xorps %xmm6,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L075dec1_loop_14:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L075dec1_loop_14
.byte 102,15,56,223,209
xorps %xmm6,%xmm2
movups %xmm2,(%edi)
L071xts_dec_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
movdqa %xmm0,(%esp)
pxor %xmm3,%xmm3
movdqa %xmm0,16(%esp)
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movdqa %xmm0,80(%esp)
movl 116(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _aes_hw_cbc_encrypt
.private_extern _aes_hw_cbc_encrypt
.align 4
_aes_hw_cbc_encrypt:
L_aes_hw_cbc_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl %esp,%ebx
movl 24(%esp),%edi
subl $24,%ebx
movl 28(%esp),%eax
andl $-16,%ebx
movl 32(%esp),%edx
movl 36(%esp),%ebp
testl %eax,%eax
jz L076cbc_abort
cmpl $0,40(%esp)
xchgl %esp,%ebx
movups (%ebp),%xmm7
movl 240(%edx),%ecx
movl %edx,%ebp
movl %ebx,16(%esp)
movl %ecx,%ebx
je L077cbc_decrypt
movaps %xmm7,%xmm2
cmpl $16,%eax
jb L078cbc_enc_tail
subl $16,%eax
jmp L079cbc_enc_loop
.align 4,0x90
L079cbc_enc_loop:
movups (%esi),%xmm7
leal 16(%esi),%esi
movups (%edx),%xmm0
movups 16(%edx),%xmm1
xorps %xmm0,%xmm7
leal 32(%edx),%edx
xorps %xmm7,%xmm2
L080enc1_loop_15:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L080enc1_loop_15
.byte 102,15,56,221,209
movl %ebx,%ecx
movl %ebp,%edx
movups %xmm2,(%edi)
leal 16(%edi),%edi
subl $16,%eax
jnc L079cbc_enc_loop
addl $16,%eax
jnz L078cbc_enc_tail
movaps %xmm2,%xmm7
pxor %xmm2,%xmm2
jmp L081cbc_ret
L078cbc_enc_tail:
movl %eax,%ecx
.long 2767451785
movl $16,%ecx
subl %eax,%ecx
xorl %eax,%eax
.long 2868115081
leal -16(%edi),%edi
movl %ebx,%ecx
movl %edi,%esi
movl %ebp,%edx
jmp L079cbc_enc_loop
.align 4,0x90
L077cbc_decrypt:
cmpl $80,%eax
jbe L082cbc_dec_tail
movaps %xmm7,(%esp)
subl $80,%eax
jmp L083cbc_dec_loop6_enter
.align 4,0x90
L084cbc_dec_loop6:
movaps %xmm0,(%esp)
movups %xmm7,(%edi)
leal 16(%edi),%edi
L083cbc_dec_loop6_enter:
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
movdqu 48(%esi),%xmm5
movdqu 64(%esi),%xmm6
movdqu 80(%esi),%xmm7
call __aesni_decrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps (%esp),%xmm2
xorps %xmm1,%xmm3
movups 32(%esi),%xmm1
xorps %xmm0,%xmm4
movups 48(%esi),%xmm0
xorps %xmm1,%xmm5
movups 64(%esi),%xmm1
xorps %xmm0,%xmm6
movups 80(%esi),%xmm0
xorps %xmm1,%xmm7
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 96(%esi),%esi
movups %xmm4,32(%edi)
movl %ebx,%ecx
movups %xmm5,48(%edi)
movl %ebp,%edx
movups %xmm6,64(%edi)
leal 80(%edi),%edi
subl $96,%eax
ja L084cbc_dec_loop6
movaps %xmm7,%xmm2
movaps %xmm0,%xmm7
addl $80,%eax
jle L085cbc_dec_clear_tail_collected
movups %xmm2,(%edi)
leal 16(%edi),%edi
L082cbc_dec_tail:
movups (%esi),%xmm2
movaps %xmm2,%xmm6
cmpl $16,%eax
jbe L086cbc_dec_one
movups 16(%esi),%xmm3
movaps %xmm3,%xmm5
cmpl $32,%eax
jbe L087cbc_dec_two
movups 32(%esi),%xmm4
cmpl $48,%eax
jbe L088cbc_dec_three
movups 48(%esi),%xmm5
cmpl $64,%eax
jbe L089cbc_dec_four
movups 64(%esi),%xmm6
movaps %xmm7,(%esp)
movups (%esi),%xmm2
xorps %xmm7,%xmm7
call __aesni_decrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps (%esp),%xmm2
xorps %xmm1,%xmm3
movups 32(%esi),%xmm1
xorps %xmm0,%xmm4
movups 48(%esi),%xmm0
xorps %xmm1,%xmm5
movups 64(%esi),%xmm7
xorps %xmm0,%xmm6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
pxor %xmm3,%xmm3
movups %xmm4,32(%edi)
pxor %xmm4,%xmm4
movups %xmm5,48(%edi)
pxor %xmm5,%xmm5
leal 64(%edi),%edi
movaps %xmm6,%xmm2
pxor %xmm6,%xmm6
subl $80,%eax
jmp L090cbc_dec_tail_collected
.align 4,0x90
L086cbc_dec_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
L091dec1_loop_16:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz L091dec1_loop_16
.byte 102,15,56,223,209
xorps %xmm7,%xmm2
movaps %xmm6,%xmm7
subl $16,%eax
jmp L090cbc_dec_tail_collected
.align 4,0x90
L087cbc_dec_two:
call __aesni_decrypt2
xorps %xmm7,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movaps %xmm3,%xmm2
pxor %xmm3,%xmm3
leal 16(%edi),%edi
movaps %xmm5,%xmm7
subl $32,%eax
jmp L090cbc_dec_tail_collected
.align 4,0x90
L088cbc_dec_three:
call __aesni_decrypt3
xorps %xmm7,%xmm2
xorps %xmm6,%xmm3
xorps %xmm5,%xmm4
movups %xmm2,(%edi)
movaps %xmm4,%xmm2
pxor %xmm4,%xmm4
movups %xmm3,16(%edi)
pxor %xmm3,%xmm3
leal 32(%edi),%edi
movups 32(%esi),%xmm7
subl $48,%eax
jmp L090cbc_dec_tail_collected
.align 4,0x90
L089cbc_dec_four:
call __aesni_decrypt4
movups 16(%esi),%xmm1
movups 32(%esi),%xmm0
xorps %xmm7,%xmm2
movups 48(%esi),%xmm7
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
xorps %xmm1,%xmm4
movups %xmm3,16(%edi)
pxor %xmm3,%xmm3
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
pxor %xmm4,%xmm4
leal 48(%edi),%edi
movaps %xmm5,%xmm2
pxor %xmm5,%xmm5
subl $64,%eax
jmp L090cbc_dec_tail_collected
.align 4,0x90
L085cbc_dec_clear_tail_collected:
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
L090cbc_dec_tail_collected:
andl $15,%eax
jnz L092cbc_dec_tail_partial
movups %xmm2,(%edi)
pxor %xmm0,%xmm0
jmp L081cbc_ret
.align 4,0x90
L092cbc_dec_tail_partial:
movaps %xmm2,(%esp)
pxor %xmm0,%xmm0
movl $16,%ecx
movl %esp,%esi
subl %eax,%ecx
.long 2767451785
movdqa %xmm2,(%esp)
L081cbc_ret:
movl 16(%esp),%esp
movl 36(%esp),%ebp
pxor %xmm2,%xmm2
pxor %xmm1,%xmm1
movups %xmm7,(%ebp)
pxor %xmm7,%xmm7
L076cbc_abort:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.private_extern __aesni_set_encrypt_key
.align 4
__aesni_set_encrypt_key:
pushl %ebp
pushl %ebx
testl %eax,%eax
jz L093bad_pointer
testl %edx,%edx
jz L093bad_pointer
call L094pic
L094pic:
popl %ebx
leal Lkey_const-L094pic(%ebx),%ebx
movl L_OPENSSL_ia32cap_P$non_lazy_ptr-Lkey_const(%ebx),%ebp
movups (%eax),%xmm0
xorps %xmm4,%xmm4
movl 4(%ebp),%ebp
leal 16(%edx),%edx
andl $268437504,%ebp
cmpl $256,%ecx
je L09514rounds
cmpl $192,%ecx
je L09612rounds
cmpl $128,%ecx
jne L097bad_keybits
.align 4,0x90
L09810rounds:
cmpl $268435456,%ebp
je L09910rounds_alt
movl $9,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,200,1
call L100key_128_cold
.byte 102,15,58,223,200,2
call L101key_128
.byte 102,15,58,223,200,4
call L101key_128
.byte 102,15,58,223,200,8
call L101key_128
.byte 102,15,58,223,200,16
call L101key_128
.byte 102,15,58,223,200,32
call L101key_128
.byte 102,15,58,223,200,64
call L101key_128
.byte 102,15,58,223,200,128
call L101key_128
.byte 102,15,58,223,200,27
call L101key_128
.byte 102,15,58,223,200,54
call L101key_128
movups %xmm0,(%edx)
movl %ecx,80(%edx)
jmp L102good_key
.align 4,0x90
L101key_128:
movups %xmm0,(%edx)
leal 16(%edx),%edx
L100key_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 4,0x90
L09910rounds_alt:
movdqa (%ebx),%xmm5
movl $8,%ecx
movdqa 32(%ebx),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,-16(%edx)
L103loop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leal 16(%edx),%edx
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%edx)
movdqa %xmm0,%xmm2
decl %ecx
jnz L103loop_key128
movdqa 48(%ebx),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%edx)
movl $9,%ecx
movl %ecx,96(%edx)
jmp L102good_key
.align 4,0x90
L09612rounds:
movq 16(%eax),%xmm2
cmpl $268435456,%ebp
je L10412rounds_alt
movl $11,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,202,1
call L105key_192a_cold
.byte 102,15,58,223,202,2
call L106key_192b
.byte 102,15,58,223,202,4
call L107key_192a
.byte 102,15,58,223,202,8
call L106key_192b
.byte 102,15,58,223,202,16
call L107key_192a
.byte 102,15,58,223,202,32
call L106key_192b
.byte 102,15,58,223,202,64
call L107key_192a
.byte 102,15,58,223,202,128
call L106key_192b
movups %xmm0,(%edx)
movl %ecx,48(%edx)
jmp L102good_key
.align 4,0x90
L107key_192a:
movups %xmm0,(%edx)
leal 16(%edx),%edx
.align 4,0x90
L105key_192a_cold:
movaps %xmm2,%xmm5
L108key_192b_warm:
shufps $16,%xmm0,%xmm4
movdqa %xmm2,%xmm3
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
pslldq $4,%xmm3
xorps %xmm4,%xmm0
pshufd $85,%xmm1,%xmm1
pxor %xmm3,%xmm2
pxor %xmm1,%xmm0
pshufd $255,%xmm0,%xmm3
pxor %xmm3,%xmm2
ret
.align 4,0x90
L106key_192b:
movaps %xmm0,%xmm3
shufps $68,%xmm0,%xmm5
movups %xmm5,(%edx)
shufps $78,%xmm2,%xmm3
movups %xmm3,16(%edx)
leal 32(%edx),%edx
jmp L108key_192b_warm
.align 4,0x90
L10412rounds_alt:
movdqa 16(%ebx),%xmm5
movdqa 32(%ebx),%xmm4
movl $8,%ecx
movdqu %xmm0,-16(%edx)
L109loop_key192:
movq %xmm2,(%edx)
movdqa %xmm2,%xmm1
.byte 102,15,56,0,213
.byte 102,15,56,221,212
pslld $1,%xmm4
leal 24(%edx),%edx
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pshufd $255,%xmm0,%xmm3
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pxor %xmm2,%xmm0
pxor %xmm3,%xmm2
movdqu %xmm0,-16(%edx)
decl %ecx
jnz L109loop_key192
movl $11,%ecx
movl %ecx,32(%edx)
jmp L102good_key
.align 4,0x90
L09514rounds:
movups 16(%eax),%xmm2
leal 16(%edx),%edx
cmpl $268435456,%ebp
je L11014rounds_alt
movl $13,%ecx
movups %xmm0,-32(%edx)
movups %xmm2,-16(%edx)
.byte 102,15,58,223,202,1
call L111key_256a_cold
.byte 102,15,58,223,200,1
call L112key_256b
.byte 102,15,58,223,202,2
call L113key_256a
.byte 102,15,58,223,200,2
call L112key_256b
.byte 102,15,58,223,202,4
call L113key_256a
.byte 102,15,58,223,200,4
call L112key_256b
.byte 102,15,58,223,202,8
call L113key_256a
.byte 102,15,58,223,200,8
call L112key_256b
.byte 102,15,58,223,202,16
call L113key_256a
.byte 102,15,58,223,200,16
call L112key_256b
.byte 102,15,58,223,202,32
call L113key_256a
.byte 102,15,58,223,200,32
call L112key_256b
.byte 102,15,58,223,202,64
call L113key_256a
movups %xmm0,(%edx)
movl %ecx,16(%edx)
xorl %eax,%eax
jmp L102good_key
.align 4,0x90
L113key_256a:
movups %xmm2,(%edx)
leal 16(%edx),%edx
L111key_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 4,0x90
L112key_256b:
movups %xmm0,(%edx)
leal 16(%edx),%edx
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.align 4,0x90
L11014rounds_alt:
movdqa (%ebx),%xmm5
movdqa 32(%ebx),%xmm4
movl $7,%ecx
movdqu %xmm0,-32(%edx)
movdqa %xmm2,%xmm1
movdqu %xmm2,-16(%edx)
L114loop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
decl %ecx
jz L115done_key256
pshufd $255,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%edx)
leal 32(%edx),%edx
movdqa %xmm2,%xmm1
jmp L114loop_key256
L115done_key256:
movl $13,%ecx
movl %ecx,16(%edx)
L102good_key:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
xorl %eax,%eax
popl %ebx
popl %ebp
ret
.align 2,0x90
L093bad_pointer:
movl $-1,%eax
popl %ebx
popl %ebp
ret
.align 2,0x90
L097bad_keybits:
pxor %xmm0,%xmm0
movl $-2,%eax
popl %ebx
popl %ebp
ret
.globl _aes_hw_set_encrypt_key
.private_extern _aes_hw_set_encrypt_key
.align 4
_aes_hw_set_encrypt_key:
L_aes_hw_set_encrypt_key_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call L116pic
L116pic:
popl %ebx
leal _BORINGSSL_function_hit+3-L116pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
call __aesni_set_encrypt_key
ret
.globl _aes_hw_set_decrypt_key
.private_extern _aes_hw_set_decrypt_key
.align 4
_aes_hw_set_decrypt_key:
L_aes_hw_set_decrypt_key_begin:
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
call __aesni_set_encrypt_key
movl 12(%esp),%edx
shll $4,%ecx
testl %eax,%eax
jnz L117dec_key_ret
leal 16(%edx,%ecx,1),%eax
movups (%edx),%xmm0
movups (%eax),%xmm1
movups %xmm0,(%eax)
movups %xmm1,(%edx)
leal 16(%edx),%edx
leal -16(%eax),%eax
L118dec_key_inverse:
movups (%edx),%xmm0
movups (%eax),%xmm1
.byte 102,15,56,219,192
.byte 102,15,56,219,201
leal 16(%edx),%edx
leal -16(%eax),%eax
movups %xmm0,16(%eax)
movups %xmm1,-16(%edx)
cmpl %edx,%eax
ja L118dec_key_inverse
movups (%edx),%xmm0
.byte 102,15,56,219,192
movups %xmm0,(%edx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
xorl %eax,%eax
L117dec_key_ret:
ret
.align 6,0x90
Lkey_const:
.long 202313229,202313229,202313229,202313229
.long 67569157,67569157,67569157,67569157
.long 1,1,1,1
.long 27,27,27,27
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69
.byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83
.byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115
.byte 115,108,46,111,114,103,62,0
.section __IMPORT,__pointers,non_lazy_symbol_pointers
L_OPENSSL_ia32cap_P$non_lazy_ptr:
.indirect_symbol _OPENSSL_ia32cap_P
.long 0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 5,508
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/ghash-ssse3-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _gcm_gmult_ssse3
.private_extern _gcm_gmult_ssse3
.align 4
_gcm_gmult_ssse3:
L_gcm_gmult_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%edi
movl 24(%esp),%esi
movdqu (%edi),%xmm0
call L000pic_point
L000pic_point:
popl %eax
movdqa Lreverse_bytes-L000pic_point(%eax),%xmm7
movdqa Llow4_mask-L000pic_point(%eax),%xmm2
.byte 102,15,56,0,199
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
movl $5,%eax
L001loop_row_1:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz L001loop_row_1
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $5,%eax
L002loop_row_2:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz L002loop_row_2
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $6,%eax
L003loop_row_3:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz L003loop_row_3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,0,215
movdqu %xmm2,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _gcm_ghash_ssse3
.private_extern _gcm_ghash_ssse3
.align 4
_gcm_ghash_ssse3:
L_gcm_ghash_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%edi
movl 24(%esp),%esi
movl 28(%esp),%edx
movl 32(%esp),%ecx
movdqu (%edi),%xmm0
call L004pic_point
L004pic_point:
popl %ebx
movdqa Lreverse_bytes-L004pic_point(%ebx),%xmm7
andl $-16,%ecx
.byte 102,15,56,0,199
pxor %xmm3,%xmm3
L005loop_ghash:
movdqa Llow4_mask-L004pic_point(%ebx),%xmm2
movdqu (%edx),%xmm1
.byte 102,15,56,0,207
pxor %xmm1,%xmm0
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
pxor %xmm2,%xmm2
movl $5,%eax
L006loop_row_4:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz L006loop_row_4
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $5,%eax
L007loop_row_5:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz L007loop_row_5
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $6,%eax
L008loop_row_6:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz L008loop_row_6
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movdqa %xmm2,%xmm0
leal -256(%esi),%esi
leal 16(%edx),%edx
subl $16,%ecx
jnz L005loop_ghash
.byte 102,15,56,0,199
movdqu %xmm0,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 4,0x90
Lreverse_bytes:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.align 4,0x90
Llow4_mask:
.long 252645135,252645135,252645135,252645135
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 16,233
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/bn-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _bn_mul_add_words
.private_extern _bn_mul_add_words
.align 4
_bn_mul_add_words:
L_bn_mul_add_words_begin:
call L000PIC_me_up
L000PIC_me_up:
popl %eax
movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L000PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc L001maw_non_sse2
movl 4(%esp),%eax
movl 8(%esp),%edx
movl 12(%esp),%ecx
movd 16(%esp),%mm0
pxor %mm1,%mm1
jmp L002maw_sse2_entry
.align 4,0x90
L003maw_sse2_unrolled:
movd (%eax),%mm3
paddq %mm3,%mm1
movd (%edx),%mm2
pmuludq %mm0,%mm2
movd 4(%edx),%mm4
pmuludq %mm0,%mm4
movd 8(%edx),%mm6
pmuludq %mm0,%mm6
movd 12(%edx),%mm7
pmuludq %mm0,%mm7
paddq %mm2,%mm1
movd 4(%eax),%mm3
paddq %mm4,%mm3
movd 8(%eax),%mm5
paddq %mm6,%mm5
movd 12(%eax),%mm4
paddq %mm4,%mm7
movd %mm1,(%eax)
movd 16(%edx),%mm2
pmuludq %mm0,%mm2
psrlq $32,%mm1
movd 20(%edx),%mm4
pmuludq %mm0,%mm4
paddq %mm3,%mm1
movd 24(%edx),%mm6
pmuludq %mm0,%mm6
movd %mm1,4(%eax)
psrlq $32,%mm1
movd 28(%edx),%mm3
addl $32,%edx
pmuludq %mm0,%mm3
paddq %mm5,%mm1
movd 16(%eax),%mm5
paddq %mm5,%mm2
movd %mm1,8(%eax)
psrlq $32,%mm1
paddq %mm7,%mm1
movd 20(%eax),%mm5
paddq %mm5,%mm4
movd %mm1,12(%eax)
psrlq $32,%mm1
paddq %mm2,%mm1
movd 24(%eax),%mm5
paddq %mm5,%mm6
movd %mm1,16(%eax)
psrlq $32,%mm1
paddq %mm4,%mm1
movd 28(%eax),%mm5
paddq %mm5,%mm3
movd %mm1,20(%eax)
psrlq $32,%mm1
paddq %mm6,%mm1
movd %mm1,24(%eax)
psrlq $32,%mm1
paddq %mm3,%mm1
movd %mm1,28(%eax)
leal 32(%eax),%eax
psrlq $32,%mm1
subl $8,%ecx
jz L004maw_sse2_exit
L002maw_sse2_entry:
testl $4294967288,%ecx
jnz L003maw_sse2_unrolled
.align 2,0x90
L005maw_sse2_loop:
movd (%edx),%mm2
movd (%eax),%mm3
pmuludq %mm0,%mm2
leal 4(%edx),%edx
paddq %mm3,%mm1
paddq %mm2,%mm1
movd %mm1,(%eax)
subl $1,%ecx
psrlq $32,%mm1
leal 4(%eax),%eax
jnz L005maw_sse2_loop
L004maw_sse2_exit:
movd %mm1,%eax
emms
ret
.align 4,0x90
L001maw_non_sse2:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %esi,%esi
movl 20(%esp),%edi
movl 28(%esp),%ecx
movl 24(%esp),%ebx
andl $4294967288,%ecx
movl 32(%esp),%ebp
pushl %ecx
jz L006maw_finish
.align 4,0x90
L007maw_loop:
# Round 0
movl (%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl (%edi),%eax
adcl $0,%edx
movl %eax,(%edi)
movl %edx,%esi
# Round 4
movl 4(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 4(%edi),%eax
adcl $0,%edx
movl %eax,4(%edi)
movl %edx,%esi
# Round 8
movl 8(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 8(%edi),%eax
adcl $0,%edx
movl %eax,8(%edi)
movl %edx,%esi
# Round 12
movl 12(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 12(%edi),%eax
adcl $0,%edx
movl %eax,12(%edi)
movl %edx,%esi
# Round 16
movl 16(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 16(%edi),%eax
adcl $0,%edx
movl %eax,16(%edi)
movl %edx,%esi
# Round 20
movl 20(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 20(%edi),%eax
adcl $0,%edx
movl %eax,20(%edi)
movl %edx,%esi
# Round 24
movl 24(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 24(%edi),%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
# Round 28
movl 28(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 28(%edi),%eax
adcl $0,%edx
movl %eax,28(%edi)
movl %edx,%esi
subl $8,%ecx
leal 32(%ebx),%ebx
leal 32(%edi),%edi
jnz L007maw_loop
L006maw_finish:
movl 32(%esp),%ecx
andl $7,%ecx
jnz L008maw_finish2
jmp L009maw_end
L008maw_finish2:
# Tail Round 0
movl (%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl (%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,(%edi)
movl %edx,%esi
jz L009maw_end
# Tail Round 1
movl 4(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 4(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,4(%edi)
movl %edx,%esi
jz L009maw_end
# Tail Round 2
movl 8(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 8(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,8(%edi)
movl %edx,%esi
jz L009maw_end
# Tail Round 3
movl 12(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 12(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,12(%edi)
movl %edx,%esi
jz L009maw_end
# Tail Round 4
movl 16(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 16(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,16(%edi)
movl %edx,%esi
jz L009maw_end
# Tail Round 5
movl 20(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 20(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,20(%edi)
movl %edx,%esi
jz L009maw_end
# Tail Round 6
movl 24(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 24(%edi),%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
L009maw_end:
movl %esi,%eax
popl %ecx
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _bn_mul_words
.private_extern _bn_mul_words
.align 4
_bn_mul_words:
L_bn_mul_words_begin:
call L010PIC_me_up
L010PIC_me_up:
popl %eax
movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L010PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc L011mw_non_sse2
movl 4(%esp),%eax
movl 8(%esp),%edx
movl 12(%esp),%ecx
movd 16(%esp),%mm0
pxor %mm1,%mm1
.align 4,0x90
L012mw_sse2_loop:
movd (%edx),%mm2
pmuludq %mm0,%mm2
leal 4(%edx),%edx
paddq %mm2,%mm1
movd %mm1,(%eax)
subl $1,%ecx
psrlq $32,%mm1
leal 4(%eax),%eax
jnz L012mw_sse2_loop
movd %mm1,%eax
emms
ret
.align 4,0x90
L011mw_non_sse2:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %esi,%esi
movl 20(%esp),%edi
movl 24(%esp),%ebx
movl 28(%esp),%ebp
movl 32(%esp),%ecx
andl $4294967288,%ebp
jz L013mw_finish
L014mw_loop:
# Round 0
movl (%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,(%edi)
movl %edx,%esi
# Round 4
movl 4(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,4(%edi)
movl %edx,%esi
# Round 8
movl 8(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,8(%edi)
movl %edx,%esi
# Round 12
movl 12(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,12(%edi)
movl %edx,%esi
# Round 16
movl 16(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,16(%edi)
movl %edx,%esi
# Round 20
movl 20(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,20(%edi)
movl %edx,%esi
# Round 24
movl 24(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
# Round 28
movl 28(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,28(%edi)
movl %edx,%esi
addl $32,%ebx
addl $32,%edi
subl $8,%ebp
jz L013mw_finish
jmp L014mw_loop
L013mw_finish:
movl 28(%esp),%ebp
andl $7,%ebp
jnz L015mw_finish2
jmp L016mw_end
L015mw_finish2:
# Tail Round 0
movl (%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,(%edi)
movl %edx,%esi
decl %ebp
jz L016mw_end
# Tail Round 1
movl 4(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,4(%edi)
movl %edx,%esi
decl %ebp
jz L016mw_end
# Tail Round 2
movl 8(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,8(%edi)
movl %edx,%esi
decl %ebp
jz L016mw_end
# Tail Round 3
movl 12(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,12(%edi)
movl %edx,%esi
decl %ebp
jz L016mw_end
# Tail Round 4
movl 16(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,16(%edi)
movl %edx,%esi
decl %ebp
jz L016mw_end
# Tail Round 5
movl 20(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,20(%edi)
movl %edx,%esi
decl %ebp
jz L016mw_end
# Tail Round 6
movl 24(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
L016mw_end:
movl %esi,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _bn_sqr_words
.private_extern _bn_sqr_words
.align 4
_bn_sqr_words:
L_bn_sqr_words_begin:
call L017PIC_me_up
L017PIC_me_up:
popl %eax
movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L017PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc L018sqr_non_sse2
movl 4(%esp),%eax
movl 8(%esp),%edx
movl 12(%esp),%ecx
.align 4,0x90
L019sqr_sse2_loop:
movd (%edx),%mm0
pmuludq %mm0,%mm0
leal 4(%edx),%edx
movq %mm0,(%eax)
subl $1,%ecx
leal 8(%eax),%eax
jnz L019sqr_sse2_loop
emms
ret
.align 4,0x90
L018sqr_non_sse2:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%ebx
andl $4294967288,%ebx
jz L020sw_finish
L021sw_loop:
# Round 0
movl (%edi),%eax
mull %eax
movl %eax,(%esi)
movl %edx,4(%esi)
# Round 4
movl 4(%edi),%eax
mull %eax
movl %eax,8(%esi)
movl %edx,12(%esi)
# Round 8
movl 8(%edi),%eax
mull %eax
movl %eax,16(%esi)
movl %edx,20(%esi)
# Round 12
movl 12(%edi),%eax
mull %eax
movl %eax,24(%esi)
movl %edx,28(%esi)
# Round 16
movl 16(%edi),%eax
mull %eax
movl %eax,32(%esi)
movl %edx,36(%esi)
# Round 20
movl 20(%edi),%eax
mull %eax
movl %eax,40(%esi)
movl %edx,44(%esi)
# Round 24
movl 24(%edi),%eax
mull %eax
movl %eax,48(%esi)
movl %edx,52(%esi)
# Round 28
movl 28(%edi),%eax
mull %eax
movl %eax,56(%esi)
movl %edx,60(%esi)
addl $32,%edi
addl $64,%esi
subl $8,%ebx
jnz L021sw_loop
L020sw_finish:
movl 28(%esp),%ebx
andl $7,%ebx
jz L022sw_end
# Tail Round 0
movl (%edi),%eax
mull %eax
movl %eax,(%esi)
decl %ebx
movl %edx,4(%esi)
jz L022sw_end
# Tail Round 1
movl 4(%edi),%eax
mull %eax
movl %eax,8(%esi)
decl %ebx
movl %edx,12(%esi)
jz L022sw_end
# Tail Round 2
movl 8(%edi),%eax
mull %eax
movl %eax,16(%esi)
decl %ebx
movl %edx,20(%esi)
jz L022sw_end
# Tail Round 3
movl 12(%edi),%eax
mull %eax
movl %eax,24(%esi)
decl %ebx
movl %edx,28(%esi)
jz L022sw_end
# Tail Round 4
movl 16(%edi),%eax
mull %eax
movl %eax,32(%esi)
decl %ebx
movl %edx,36(%esi)
jz L022sw_end
# Tail Round 5
movl 20(%edi),%eax
mull %eax
movl %eax,40(%esi)
decl %ebx
movl %edx,44(%esi)
jz L022sw_end
# Tail Round 6
movl 24(%edi),%eax
mull %eax
movl %eax,48(%esi)
movl %edx,52(%esi)
L022sw_end:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _bn_div_words
.private_extern _bn_div_words
.align 4
_bn_div_words:
L_bn_div_words_begin:
movl 4(%esp),%edx
movl 8(%esp),%eax
movl 12(%esp),%ecx
divl %ecx
ret
.globl _bn_add_words
.private_extern _bn_add_words
.align 4
_bn_add_words:
L_bn_add_words_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%ebx
movl 24(%esp),%esi
movl 28(%esp),%edi
movl 32(%esp),%ebp
xorl %eax,%eax
andl $4294967288,%ebp
jz L023aw_finish
L024aw_loop:
# Round 0
movl (%esi),%ecx
movl (%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,(%ebx)
# Round 1
movl 4(%esi),%ecx
movl 4(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,4(%ebx)
# Round 2
movl 8(%esi),%ecx
movl 8(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,8(%ebx)
# Round 3
movl 12(%esi),%ecx
movl 12(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,12(%ebx)
# Round 4
movl 16(%esi),%ecx
movl 16(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,16(%ebx)
# Round 5
movl 20(%esi),%ecx
movl 20(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,20(%ebx)
# Round 6
movl 24(%esi),%ecx
movl 24(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
# Round 7
movl 28(%esi),%ecx
movl 28(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,28(%ebx)
addl $32,%esi
addl $32,%edi
addl $32,%ebx
subl $8,%ebp
jnz L024aw_loop
L023aw_finish:
movl 32(%esp),%ebp
andl $7,%ebp
jz L025aw_end
# Tail Round 0
movl (%esi),%ecx
movl (%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,(%ebx)
jz L025aw_end
# Tail Round 1
movl 4(%esi),%ecx
movl 4(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,4(%ebx)
jz L025aw_end
# Tail Round 2
movl 8(%esi),%ecx
movl 8(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,8(%ebx)
jz L025aw_end
# Tail Round 3
movl 12(%esi),%ecx
movl 12(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,12(%ebx)
jz L025aw_end
# Tail Round 4
movl 16(%esi),%ecx
movl 16(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,16(%ebx)
jz L025aw_end
# Tail Round 5
movl 20(%esi),%ecx
movl 20(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,20(%ebx)
jz L025aw_end
# Tail Round 6
movl 24(%esi),%ecx
movl 24(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
L025aw_end:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _bn_sub_words
.private_extern _bn_sub_words
.align 4
_bn_sub_words:
L_bn_sub_words_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%ebx
movl 24(%esp),%esi
movl 28(%esp),%edi
movl 32(%esp),%ebp
xorl %eax,%eax
andl $4294967288,%ebp
jz L026aw_finish
L027aw_loop:
# Round 0
movl (%esi),%ecx
movl (%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,(%ebx)
# Round 1
movl 4(%esi),%ecx
movl 4(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,4(%ebx)
# Round 2
movl 8(%esi),%ecx
movl 8(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,8(%ebx)
# Round 3
movl 12(%esi),%ecx
movl 12(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,12(%ebx)
# Round 4
movl 16(%esi),%ecx
movl 16(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,16(%ebx)
# Round 5
movl 20(%esi),%ecx
movl 20(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,20(%ebx)
# Round 6
movl 24(%esi),%ecx
movl 24(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
# Round 7
movl 28(%esi),%ecx
movl 28(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,28(%ebx)
addl $32,%esi
addl $32,%edi
addl $32,%ebx
subl $8,%ebp
jnz L027aw_loop
L026aw_finish:
movl 32(%esp),%ebp
andl $7,%ebp
jz L028aw_end
# Tail Round 0
movl (%esi),%ecx
movl (%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,(%ebx)
jz L028aw_end
# Tail Round 1
movl 4(%esi),%ecx
movl 4(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,4(%ebx)
jz L028aw_end
# Tail Round 2
movl 8(%esi),%ecx
movl 8(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,8(%ebx)
jz L028aw_end
# Tail Round 3
movl 12(%esi),%ecx
movl 12(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,12(%ebx)
jz L028aw_end
# Tail Round 4
movl 16(%esi),%ecx
movl 16(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,16(%ebx)
jz L028aw_end
# Tail Round 5
movl 20(%esi),%ecx
movl 20(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,20(%ebx)
jz L028aw_end
# Tail Round 6
movl 24(%esi),%ecx
movl 24(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
L028aw_end:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.section __IMPORT,__pointers,non_lazy_symbol_pointers
L_OPENSSL_ia32cap_P$non_lazy_ptr:
.indirect_symbol _OPENSSL_ia32cap_P
.long 0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 9,078
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/x86-mont.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _bn_mul_mont
.private_extern _bn_mul_mont
.align 4
_bn_mul_mont:
L_bn_mul_mont_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
movl 40(%esp),%edi
cmpl $4,%edi
jl L000just_leave
leal 20(%esp),%esi
leal 24(%esp),%edx
addl $2,%edi
negl %edi
leal -32(%esp,%edi,4),%ebp
negl %edi
movl %ebp,%eax
subl %edx,%eax
andl $2047,%eax
subl %eax,%ebp
xorl %ebp,%edx
andl $2048,%edx
xorl $2048,%edx
subl %edx,%ebp
andl $-64,%ebp
movl %esp,%eax
subl %ebp,%eax
andl $-4096,%eax
movl %esp,%edx
leal (%ebp,%eax,1),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja L001page_walk
jmp L002page_walk_done
.align 4,0x90
L001page_walk:
leal -4096(%esp),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja L001page_walk
L002page_walk_done:
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%ebp
movl 16(%esi),%esi
movl (%esi),%esi
movl %eax,4(%esp)
movl %ebx,8(%esp)
movl %ecx,12(%esp)
movl %ebp,16(%esp)
movl %esi,20(%esp)
leal -3(%edi),%ebx
movl %edx,24(%esp)
call L003PIC_me_up
L003PIC_me_up:
popl %eax
movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L003PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc L004non_sse2
movl $-1,%eax
movd %eax,%mm7
movl 8(%esp),%esi
movl 12(%esp),%edi
movl 16(%esp),%ebp
xorl %edx,%edx
xorl %ecx,%ecx
movd (%edi),%mm4
movd (%esi),%mm5
movd (%ebp),%mm3
pmuludq %mm4,%mm5
movq %mm5,%mm2
movq %mm5,%mm0
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
incl %ecx
.align 4,0x90
L0051st:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
leal 1(%ecx),%ecx
cmpl %ebx,%ecx
jl L0051st
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm2,%mm3
movq %mm3,32(%esp,%ebx,4)
incl %edx
L006outer:
xorl %ecx,%ecx
movd (%edi,%edx,4),%mm4
movd (%esi),%mm5
movd 32(%esp),%mm6
movd (%ebp),%mm3
pmuludq %mm4,%mm5
paddq %mm6,%mm5
movq %mm5,%mm0
movq %mm5,%mm2
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 36(%esp),%mm6
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm6,%mm2
incl %ecx
decl %ebx
L007inner:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
movd 36(%esp,%ecx,4),%mm6
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
paddq %mm6,%mm2
decl %ebx
leal 1(%ecx),%ecx
jnz L007inner
movl %ecx,%ebx
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
movd 36(%esp,%ebx,4),%mm6
paddq %mm2,%mm3
paddq %mm6,%mm3
movq %mm3,32(%esp,%ebx,4)
leal 1(%edx),%edx
cmpl %ebx,%edx
jle L006outer
emms
jmp L008common_tail
.align 4,0x90
L004non_sse2:
movl 8(%esp),%esi
leal 1(%ebx),%ebp
movl 12(%esp),%edi
xorl %ecx,%ecx
movl %esi,%edx
andl $1,%ebp
subl %edi,%edx
leal 4(%edi,%ebx,4),%eax
orl %edx,%ebp
movl (%edi),%edi
jz L009bn_sqr_mont
movl %eax,28(%esp)
movl (%esi),%eax
xorl %edx,%edx
.align 4,0x90
L010mull:
movl %edx,%ebp
mull %edi
addl %eax,%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
movl (%esi,%ecx,4),%eax
cmpl %ebx,%ecx
movl %ebp,28(%esp,%ecx,4)
jl L010mull
movl %edx,%ebp
mull %edi
movl 20(%esp),%edi
addl %ebp,%eax
movl 16(%esp),%esi
adcl $0,%edx
imull 32(%esp),%edi
movl %eax,32(%esp,%ebx,4)
xorl %ecx,%ecx
movl %edx,36(%esp,%ebx,4)
movl %ecx,40(%esp,%ebx,4)
movl (%esi),%eax
mull %edi
addl 32(%esp),%eax
movl 4(%esi),%eax
adcl $0,%edx
incl %ecx
jmp L0112ndmadd
.align 4,0x90
L0121stmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,28(%esp,%ecx,4)
jl L0121stmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%eax
movl 20(%esp),%edi
adcl $0,%edx
movl 16(%esp),%esi
addl %eax,%ebp
adcl $0,%edx
imull 32(%esp),%edi
xorl %ecx,%ecx
addl 36(%esp,%ebx,4),%edx
movl %ebp,32(%esp,%ebx,4)
adcl $0,%ecx
movl (%esi),%eax
movl %edx,36(%esp,%ebx,4)
movl %ecx,40(%esp,%ebx,4)
mull %edi
addl 32(%esp),%eax
movl 4(%esi),%eax
adcl $0,%edx
movl $1,%ecx
.align 4,0x90
L0112ndmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,24(%esp,%ecx,4)
jl L0112ndmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
adcl $0,%edx
movl %ebp,28(%esp,%ebx,4)
xorl %eax,%eax
movl 12(%esp),%ecx
addl 36(%esp,%ebx,4),%edx
adcl 40(%esp,%ebx,4),%eax
leal 4(%ecx),%ecx
movl %edx,32(%esp,%ebx,4)
cmpl 28(%esp),%ecx
movl %eax,36(%esp,%ebx,4)
je L008common_tail
movl (%ecx),%edi
movl 8(%esp),%esi
movl %ecx,12(%esp)
xorl %ecx,%ecx
xorl %edx,%edx
movl (%esi),%eax
jmp L0121stmadd
.align 4,0x90
L009bn_sqr_mont:
movl %ebx,(%esp)
movl %ecx,12(%esp)
movl %edi,%eax
mull %edi
movl %eax,32(%esp)
movl %edx,%ebx
shrl $1,%edx
andl $1,%ebx
incl %ecx
.align 4,0x90
L013sqr:
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
leal 1(%ecx),%ecx
adcl $0,%edx
leal (%ebx,%eax,2),%ebp
shrl $31,%eax
cmpl (%esp),%ecx
movl %eax,%ebx
movl %ebp,28(%esp,%ecx,4)
jl L013sqr
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
movl 20(%esp),%edi
adcl $0,%edx
movl 16(%esp),%esi
leal (%ebx,%eax,2),%ebp
imull 32(%esp),%edi
shrl $31,%eax
movl %ebp,32(%esp,%ecx,4)
leal (%eax,%edx,2),%ebp
movl (%esi),%eax
shrl $31,%edx
movl %ebp,36(%esp,%ecx,4)
movl %edx,40(%esp,%ecx,4)
mull %edi
addl 32(%esp),%eax
movl %ecx,%ebx
adcl $0,%edx
movl 4(%esi),%eax
movl $1,%ecx
.align 4,0x90
L0143rdmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
movl 4(%esi,%ecx,4),%eax
adcl $0,%edx
movl %ebp,28(%esp,%ecx,4)
movl %edx,%ebp
mull %edi
addl 36(%esp,%ecx,4),%ebp
leal 2(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,24(%esp,%ecx,4)
jl L0143rdmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
adcl $0,%edx
movl %ebp,28(%esp,%ebx,4)
movl 12(%esp),%ecx
xorl %eax,%eax
movl 8(%esp),%esi
addl 36(%esp,%ebx,4),%edx
adcl 40(%esp,%ebx,4),%eax
movl %edx,32(%esp,%ebx,4)
cmpl %ebx,%ecx
movl %eax,36(%esp,%ebx,4)
je L008common_tail
movl 4(%esi,%ecx,4),%edi
leal 1(%ecx),%ecx
movl %edi,%eax
movl %ecx,12(%esp)
mull %edi
addl 32(%esp,%ecx,4),%eax
adcl $0,%edx
movl %eax,32(%esp,%ecx,4)
xorl %ebp,%ebp
cmpl %ebx,%ecx
leal 1(%ecx),%ecx
je L015sqrlast
movl %edx,%ebx
shrl $1,%edx
andl $1,%ebx
.align 4,0x90
L016sqradd:
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
leal (%eax,%eax,1),%ebp
adcl $0,%edx
shrl $31,%eax
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%eax
addl %ebx,%ebp
adcl $0,%eax
cmpl (%esp),%ecx
movl %ebp,28(%esp,%ecx,4)
movl %eax,%ebx
jle L016sqradd
movl %edx,%ebp
addl %edx,%edx
shrl $31,%ebp
addl %ebx,%edx
adcl $0,%ebp
L015sqrlast:
movl 20(%esp),%edi
movl 16(%esp),%esi
imull 32(%esp),%edi
addl 32(%esp,%ecx,4),%edx
movl (%esi),%eax
adcl $0,%ebp
movl %edx,32(%esp,%ecx,4)
movl %ebp,36(%esp,%ecx,4)
mull %edi
addl 32(%esp),%eax
leal -1(%ecx),%ebx
adcl $0,%edx
movl $1,%ecx
movl 4(%esi),%eax
jmp L0143rdmadd
.align 4,0x90
L008common_tail:
movl 16(%esp),%ebp
movl 4(%esp),%edi
leal 32(%esp),%esi
movl (%esi),%eax
movl %ebx,%ecx
xorl %edx,%edx
.align 4,0x90
L017sub:
sbbl (%ebp,%edx,4),%eax
movl %eax,(%edi,%edx,4)
decl %ecx
movl 4(%esi,%edx,4),%eax
leal 1(%edx),%edx
jge L017sub
sbbl $0,%eax
movl $-1,%edx
xorl %eax,%edx
jmp L018copy
.align 4,0x90
L018copy:
movl 32(%esp,%ebx,4),%esi
movl (%edi,%ebx,4),%ebp
movl %ecx,32(%esp,%ebx,4)
andl %eax,%esi
andl %edx,%ebp
orl %esi,%ebp
movl %ebp,(%edi,%ebx,4)
decl %ebx
jge L018copy
movl 24(%esp),%esp
movl $1,%eax
L000just_leave:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
.byte 111,114,103,62,0
.section __IMPORT,__pointers,non_lazy_symbol_pointers
L_OPENSSL_ia32cap_P$non_lazy_ptr:
.indirect_symbol _OPENSSL_ia32cap_P
.long 0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 49,913
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/sha512-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _sha512_block_data_order
.private_extern _sha512_block_data_order
.align 4
_sha512_block_data_order:
L_sha512_block_data_order_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call L000pic_point
L000pic_point:
popl %ebp
leal L001K512-L000pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $7,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L001K512(%ebp),%edx
movl (%edx),%ecx
testl $67108864,%ecx
jz L002loop_x86
movl 4(%edx),%edx
movq (%esi),%mm0
andl $16777216,%ecx
movq 8(%esi),%mm1
andl $512,%edx
movq 16(%esi),%mm2
orl %edx,%ecx
movq 24(%esi),%mm3
movq 32(%esi),%mm4
movq 40(%esi),%mm5
movq 48(%esi),%mm6
movq 56(%esi),%mm7
cmpl $16777728,%ecx
je L003SSSE3
subl $80,%esp
jmp L004loop_sse2
.align 4,0x90
L004loop_sse2:
movq %mm1,8(%esp)
movq %mm2,16(%esp)
movq %mm3,24(%esp)
movq %mm5,40(%esp)
movq %mm6,48(%esp)
pxor %mm1,%mm2
movq %mm7,56(%esp)
movq %mm0,%mm3
movl (%edi),%eax
movl 4(%edi),%ebx
addl $8,%edi
movl $15,%edx
bswap %eax
bswap %ebx
jmp L00500_14_sse2
.align 4,0x90
L00500_14_sse2:
movd %eax,%mm1
movl (%edi),%eax
movd %ebx,%mm7
movl 4(%edi),%ebx
addl $8,%edi
bswap %eax
bswap %ebx
punpckldq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm3,%mm0
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm2,%mm3
movq %mm0,%mm2
addl $8,%ebp
paddq %mm6,%mm3
movq 48(%esp),%mm6
decl %edx
jnz L00500_14_sse2
movd %eax,%mm1
movd %ebx,%mm7
punpckldq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm3,%mm0
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 192(%esp),%mm7
paddq %mm2,%mm3
movq %mm0,%mm2
addl $8,%ebp
paddq %mm6,%mm3
pxor %mm0,%mm0
movl $32,%edx
jmp L00616_79_sse2
.align 4,0x90
L00616_79_sse2:
movq 88(%esp),%mm5
movq %mm7,%mm1
psrlq $1,%mm7
movq %mm5,%mm6
psrlq $6,%mm5
psllq $56,%mm1
paddq %mm3,%mm0
movq %mm7,%mm3
psrlq $6,%mm7
pxor %mm1,%mm3
psllq $7,%mm1
pxor %mm7,%mm3
psrlq $1,%mm7
pxor %mm1,%mm3
movq %mm5,%mm1
psrlq $13,%mm5
pxor %mm3,%mm7
psllq $3,%mm6
pxor %mm5,%mm1
paddq 200(%esp),%mm7
pxor %mm6,%mm1
psrlq $42,%mm5
paddq 128(%esp),%mm7
pxor %mm5,%mm1
psllq $42,%mm6
movq 40(%esp),%mm5
pxor %mm6,%mm1
movq 48(%esp),%mm6
paddq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 192(%esp),%mm7
paddq %mm6,%mm2
addl $8,%ebp
movq 88(%esp),%mm5
movq %mm7,%mm1
psrlq $1,%mm7
movq %mm5,%mm6
psrlq $6,%mm5
psllq $56,%mm1
paddq %mm3,%mm2
movq %mm7,%mm3
psrlq $6,%mm7
pxor %mm1,%mm3
psllq $7,%mm1
pxor %mm7,%mm3
psrlq $1,%mm7
pxor %mm1,%mm3
movq %mm5,%mm1
psrlq $13,%mm5
pxor %mm3,%mm7
psllq $3,%mm6
pxor %mm5,%mm1
paddq 200(%esp),%mm7
pxor %mm6,%mm1
psrlq $42,%mm5
paddq 128(%esp),%mm7
pxor %mm5,%mm1
psllq $42,%mm6
movq 40(%esp),%mm5
pxor %mm6,%mm1
movq 48(%esp),%mm6
paddq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 192(%esp),%mm7
paddq %mm6,%mm0
addl $8,%ebp
decl %edx
jnz L00616_79_sse2
paddq %mm3,%mm0
movq 8(%esp),%mm1
movq 24(%esp),%mm3
movq 40(%esp),%mm5
movq 48(%esp),%mm6
movq 56(%esp),%mm7
pxor %mm1,%mm2
paddq (%esi),%mm0
paddq 8(%esi),%mm1
paddq 16(%esi),%mm2
paddq 24(%esi),%mm3
paddq 32(%esi),%mm4
paddq 40(%esi),%mm5
paddq 48(%esi),%mm6
paddq 56(%esi),%mm7
movl $640,%eax
movq %mm0,(%esi)
movq %mm1,8(%esi)
movq %mm2,16(%esi)
movq %mm3,24(%esi)
movq %mm4,32(%esi)
movq %mm5,40(%esi)
movq %mm6,48(%esi)
movq %mm7,56(%esi)
leal (%esp,%eax,1),%esp
subl %eax,%ebp
cmpl 88(%esp),%edi
jb L004loop_sse2
movl 92(%esp),%esp
emms
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 5,0x90
L003SSSE3:
leal -64(%esp),%edx
subl $256,%esp
movdqa 640(%ebp),%xmm1
movdqu (%edi),%xmm0
.byte 102,15,56,0,193
movdqa (%ebp),%xmm3
movdqa %xmm1,%xmm2
movdqu 16(%edi),%xmm1
paddq %xmm0,%xmm3
.byte 102,15,56,0,202
movdqa %xmm3,-128(%edx)
movdqa 16(%ebp),%xmm4
movdqa %xmm2,%xmm3
movdqu 32(%edi),%xmm2
paddq %xmm1,%xmm4
.byte 102,15,56,0,211
movdqa %xmm4,-112(%edx)
movdqa 32(%ebp),%xmm5
movdqa %xmm3,%xmm4
movdqu 48(%edi),%xmm3
paddq %xmm2,%xmm5
.byte 102,15,56,0,220
movdqa %xmm5,-96(%edx)
movdqa 48(%ebp),%xmm6
movdqa %xmm4,%xmm5
movdqu 64(%edi),%xmm4
paddq %xmm3,%xmm6
.byte 102,15,56,0,229
movdqa %xmm6,-80(%edx)
movdqa 64(%ebp),%xmm7
movdqa %xmm5,%xmm6
movdqu 80(%edi),%xmm5
paddq %xmm4,%xmm7
.byte 102,15,56,0,238
movdqa %xmm7,-64(%edx)
movdqa %xmm0,(%edx)
movdqa 80(%ebp),%xmm0
movdqa %xmm6,%xmm7
movdqu 96(%edi),%xmm6
paddq %xmm5,%xmm0
.byte 102,15,56,0,247
movdqa %xmm0,-48(%edx)
movdqa %xmm1,16(%edx)
movdqa 96(%ebp),%xmm1
movdqa %xmm7,%xmm0
movdqu 112(%edi),%xmm7
paddq %xmm6,%xmm1
.byte 102,15,56,0,248
movdqa %xmm1,-32(%edx)
movdqa %xmm2,32(%edx)
movdqa 112(%ebp),%xmm2
movdqa (%edx),%xmm0
paddq %xmm7,%xmm2
movdqa %xmm2,-16(%edx)
nop
.align 5,0x90
L007loop_ssse3:
movdqa 16(%edx),%xmm2
movdqa %xmm3,48(%edx)
leal 128(%ebp),%ebp
movq %mm1,8(%esp)
movl %edi,%ebx
movq %mm2,16(%esp)
leal 128(%edi),%edi
movq %mm3,24(%esp)
cmpl %eax,%edi
movq %mm5,40(%esp)
cmovbl %edi,%ebx
movq %mm6,48(%esp)
movl $4,%ecx
pxor %mm1,%mm2
movq %mm7,56(%esp)
pxor %mm3,%mm3
jmp L00800_47_ssse3
.align 5,0x90
L00800_47_ssse3:
movdqa %xmm5,%xmm3
movdqa %xmm2,%xmm1
.byte 102,15,58,15,208,8
movdqa %xmm4,(%edx)
.byte 102,15,58,15,220,8
movdqa %xmm2,%xmm4
psrlq $7,%xmm2
paddq %xmm3,%xmm0
movdqa %xmm4,%xmm3
psrlq $1,%xmm4
psllq $56,%xmm3
pxor %xmm4,%xmm2
psrlq $7,%xmm4
pxor %xmm3,%xmm2
psllq $7,%xmm3
pxor %xmm4,%xmm2
movdqa %xmm7,%xmm4
pxor %xmm3,%xmm2
movdqa %xmm7,%xmm3
psrlq $6,%xmm4
paddq %xmm2,%xmm0
movdqa %xmm7,%xmm2
psrlq $19,%xmm3
psllq $3,%xmm2
pxor %xmm3,%xmm4
psrlq $42,%xmm3
pxor %xmm2,%xmm4
psllq $42,%xmm2
pxor %xmm3,%xmm4
movdqa 32(%edx),%xmm3
pxor %xmm2,%xmm4
movdqa (%ebp),%xmm2
movq %mm4,%mm1
paddq %xmm4,%xmm0
movq -128(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
paddq %xmm0,%xmm2
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -120(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm2,-128(%edx)
movdqa %xmm6,%xmm4
movdqa %xmm3,%xmm2
.byte 102,15,58,15,217,8
movdqa %xmm5,16(%edx)
.byte 102,15,58,15,229,8
movdqa %xmm3,%xmm5
psrlq $7,%xmm3
paddq %xmm4,%xmm1
movdqa %xmm5,%xmm4
psrlq $1,%xmm5
psllq $56,%xmm4
pxor %xmm5,%xmm3
psrlq $7,%xmm5
pxor %xmm4,%xmm3
psllq $7,%xmm4
pxor %xmm5,%xmm3
movdqa %xmm0,%xmm5
pxor %xmm4,%xmm3
movdqa %xmm0,%xmm4
psrlq $6,%xmm5
paddq %xmm3,%xmm1
movdqa %xmm0,%xmm3
psrlq $19,%xmm4
psllq $3,%xmm3
pxor %xmm4,%xmm5
psrlq $42,%xmm4
pxor %xmm3,%xmm5
psllq $42,%xmm3
pxor %xmm4,%xmm5
movdqa 48(%edx),%xmm4
pxor %xmm3,%xmm5
movdqa 16(%ebp),%xmm3
movq %mm4,%mm1
paddq %xmm5,%xmm1
movq -112(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
paddq %xmm1,%xmm3
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -104(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm3,-112(%edx)
movdqa %xmm7,%xmm5
movdqa %xmm4,%xmm3
.byte 102,15,58,15,226,8
movdqa %xmm6,32(%edx)
.byte 102,15,58,15,238,8
movdqa %xmm4,%xmm6
psrlq $7,%xmm4
paddq %xmm5,%xmm2
movdqa %xmm6,%xmm5
psrlq $1,%xmm6
psllq $56,%xmm5
pxor %xmm6,%xmm4
psrlq $7,%xmm6
pxor %xmm5,%xmm4
psllq $7,%xmm5
pxor %xmm6,%xmm4
movdqa %xmm1,%xmm6
pxor %xmm5,%xmm4
movdqa %xmm1,%xmm5
psrlq $6,%xmm6
paddq %xmm4,%xmm2
movdqa %xmm1,%xmm4
psrlq $19,%xmm5
psllq $3,%xmm4
pxor %xmm5,%xmm6
psrlq $42,%xmm5
pxor %xmm4,%xmm6
psllq $42,%xmm4
pxor %xmm5,%xmm6
movdqa (%edx),%xmm5
pxor %xmm4,%xmm6
movdqa 32(%ebp),%xmm4
movq %mm4,%mm1
paddq %xmm6,%xmm2
movq -96(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
paddq %xmm2,%xmm4
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -88(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm4,-96(%edx)
movdqa %xmm0,%xmm6
movdqa %xmm5,%xmm4
.byte 102,15,58,15,235,8
movdqa %xmm7,48(%edx)
.byte 102,15,58,15,247,8
movdqa %xmm5,%xmm7
psrlq $7,%xmm5
paddq %xmm6,%xmm3
movdqa %xmm7,%xmm6
psrlq $1,%xmm7
psllq $56,%xmm6
pxor %xmm7,%xmm5
psrlq $7,%xmm7
pxor %xmm6,%xmm5
psllq $7,%xmm6
pxor %xmm7,%xmm5
movdqa %xmm2,%xmm7
pxor %xmm6,%xmm5
movdqa %xmm2,%xmm6
psrlq $6,%xmm7
paddq %xmm5,%xmm3
movdqa %xmm2,%xmm5
psrlq $19,%xmm6
psllq $3,%xmm5
pxor %xmm6,%xmm7
psrlq $42,%xmm6
pxor %xmm5,%xmm7
psllq $42,%xmm5
pxor %xmm6,%xmm7
movdqa 16(%edx),%xmm6
pxor %xmm5,%xmm7
movdqa 48(%ebp),%xmm5
movq %mm4,%mm1
paddq %xmm7,%xmm3
movq -80(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
paddq %xmm3,%xmm5
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -72(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm5,-80(%edx)
movdqa %xmm1,%xmm7
movdqa %xmm6,%xmm5
.byte 102,15,58,15,244,8
movdqa %xmm0,(%edx)
.byte 102,15,58,15,248,8
movdqa %xmm6,%xmm0
psrlq $7,%xmm6
paddq %xmm7,%xmm4
movdqa %xmm0,%xmm7
psrlq $1,%xmm0
psllq $56,%xmm7
pxor %xmm0,%xmm6
psrlq $7,%xmm0
pxor %xmm7,%xmm6
psllq $7,%xmm7
pxor %xmm0,%xmm6
movdqa %xmm3,%xmm0
pxor %xmm7,%xmm6
movdqa %xmm3,%xmm7
psrlq $6,%xmm0
paddq %xmm6,%xmm4
movdqa %xmm3,%xmm6
psrlq $19,%xmm7
psllq $3,%xmm6
pxor %xmm7,%xmm0
psrlq $42,%xmm7
pxor %xmm6,%xmm0
psllq $42,%xmm6
pxor %xmm7,%xmm0
movdqa 32(%edx),%xmm7
pxor %xmm6,%xmm0
movdqa 64(%ebp),%xmm6
movq %mm4,%mm1
paddq %xmm0,%xmm4
movq -64(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
paddq %xmm4,%xmm6
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -56(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm6,-64(%edx)
movdqa %xmm2,%xmm0
movdqa %xmm7,%xmm6
.byte 102,15,58,15,253,8
movdqa %xmm1,16(%edx)
.byte 102,15,58,15,193,8
movdqa %xmm7,%xmm1
psrlq $7,%xmm7
paddq %xmm0,%xmm5
movdqa %xmm1,%xmm0
psrlq $1,%xmm1
psllq $56,%xmm0
pxor %xmm1,%xmm7
psrlq $7,%xmm1
pxor %xmm0,%xmm7
psllq $7,%xmm0
pxor %xmm1,%xmm7
movdqa %xmm4,%xmm1
pxor %xmm0,%xmm7
movdqa %xmm4,%xmm0
psrlq $6,%xmm1
paddq %xmm7,%xmm5
movdqa %xmm4,%xmm7
psrlq $19,%xmm0
psllq $3,%xmm7
pxor %xmm0,%xmm1
psrlq $42,%xmm0
pxor %xmm7,%xmm1
psllq $42,%xmm7
pxor %xmm0,%xmm1
movdqa 48(%edx),%xmm0
pxor %xmm7,%xmm1
movdqa 80(%ebp),%xmm7
movq %mm4,%mm1
paddq %xmm1,%xmm5
movq -48(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
paddq %xmm5,%xmm7
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -40(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm7,-48(%edx)
movdqa %xmm3,%xmm1
movdqa %xmm0,%xmm7
.byte 102,15,58,15,198,8
movdqa %xmm2,32(%edx)
.byte 102,15,58,15,202,8
movdqa %xmm0,%xmm2
psrlq $7,%xmm0
paddq %xmm1,%xmm6
movdqa %xmm2,%xmm1
psrlq $1,%xmm2
psllq $56,%xmm1
pxor %xmm2,%xmm0
psrlq $7,%xmm2
pxor %xmm1,%xmm0
psllq $7,%xmm1
pxor %xmm2,%xmm0
movdqa %xmm5,%xmm2
pxor %xmm1,%xmm0
movdqa %xmm5,%xmm1
psrlq $6,%xmm2
paddq %xmm0,%xmm6
movdqa %xmm5,%xmm0
psrlq $19,%xmm1
psllq $3,%xmm0
pxor %xmm1,%xmm2
psrlq $42,%xmm1
pxor %xmm0,%xmm2
psllq $42,%xmm0
pxor %xmm1,%xmm2
movdqa (%edx),%xmm1
pxor %xmm0,%xmm2
movdqa 96(%ebp),%xmm0
movq %mm4,%mm1
paddq %xmm2,%xmm6
movq -32(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
paddq %xmm6,%xmm0
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -24(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm0,-32(%edx)
movdqa %xmm4,%xmm2
movdqa %xmm1,%xmm0
.byte 102,15,58,15,207,8
movdqa %xmm3,48(%edx)
.byte 102,15,58,15,211,8
movdqa %xmm1,%xmm3
psrlq $7,%xmm1
paddq %xmm2,%xmm7
movdqa %xmm3,%xmm2
psrlq $1,%xmm3
psllq $56,%xmm2
pxor %xmm3,%xmm1
psrlq $7,%xmm3
pxor %xmm2,%xmm1
psllq $7,%xmm2
pxor %xmm3,%xmm1
movdqa %xmm6,%xmm3
pxor %xmm2,%xmm1
movdqa %xmm6,%xmm2
psrlq $6,%xmm3
paddq %xmm1,%xmm7
movdqa %xmm6,%xmm1
psrlq $19,%xmm2
psllq $3,%xmm1
pxor %xmm2,%xmm3
psrlq $42,%xmm2
pxor %xmm1,%xmm3
psllq $42,%xmm1
pxor %xmm2,%xmm3
movdqa 16(%edx),%xmm2
pxor %xmm1,%xmm3
movdqa 112(%ebp),%xmm1
movq %mm4,%mm1
paddq %xmm3,%xmm7
movq -16(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
paddq %xmm7,%xmm1
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -8(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm1,-16(%edx)
leal 128(%ebp),%ebp
decl %ecx
jnz L00800_47_ssse3
movdqa (%ebp),%xmm1
leal -640(%ebp),%ebp
movdqu (%ebx),%xmm0
.byte 102,15,56,0,193
movdqa (%ebp),%xmm3
movdqa %xmm1,%xmm2
movdqu 16(%ebx),%xmm1
paddq %xmm0,%xmm3
.byte 102,15,56,0,202
movq %mm4,%mm1
movq -128(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -120(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm3,-128(%edx)
movdqa 16(%ebp),%xmm4
movdqa %xmm2,%xmm3
movdqu 32(%ebx),%xmm2
paddq %xmm1,%xmm4
.byte 102,15,56,0,211
movq %mm4,%mm1
movq -112(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -104(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm4,-112(%edx)
movdqa 32(%ebp),%xmm5
movdqa %xmm3,%xmm4
movdqu 48(%ebx),%xmm3
paddq %xmm2,%xmm5
.byte 102,15,56,0,220
movq %mm4,%mm1
movq -96(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -88(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm5,-96(%edx)
movdqa 48(%ebp),%xmm6
movdqa %xmm4,%xmm5
movdqu 64(%ebx),%xmm4
paddq %xmm3,%xmm6
.byte 102,15,56,0,229
movq %mm4,%mm1
movq -80(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -72(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm6,-80(%edx)
movdqa 64(%ebp),%xmm7
movdqa %xmm5,%xmm6
movdqu 80(%ebx),%xmm5
paddq %xmm4,%xmm7
.byte 102,15,56,0,238
movq %mm4,%mm1
movq -64(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -56(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm7,-64(%edx)
movdqa %xmm0,(%edx)
movdqa 80(%ebp),%xmm0
movdqa %xmm6,%xmm7
movdqu 96(%ebx),%xmm6
paddq %xmm5,%xmm0
.byte 102,15,56,0,247
movq %mm4,%mm1
movq -48(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -40(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm0,-48(%edx)
movdqa %xmm1,16(%edx)
movdqa 96(%ebp),%xmm1
movdqa %xmm7,%xmm0
movdqu 112(%ebx),%xmm7
paddq %xmm6,%xmm1
.byte 102,15,56,0,248
movq %mm4,%mm1
movq -32(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -24(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm1,-32(%edx)
movdqa %xmm2,32(%edx)
movdqa 112(%ebp),%xmm2
movdqa (%edx),%xmm0
paddq %xmm7,%xmm2
movq %mm4,%mm1
movq -16(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -8(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm2,-16(%edx)
movq 8(%esp),%mm1
paddq %mm3,%mm0
movq 24(%esp),%mm3
movq 56(%esp),%mm7
pxor %mm1,%mm2
paddq (%esi),%mm0
paddq 8(%esi),%mm1
paddq 16(%esi),%mm2
paddq 24(%esi),%mm3
paddq 32(%esi),%mm4
paddq 40(%esi),%mm5
paddq 48(%esi),%mm6
paddq 56(%esi),%mm7
movq %mm0,(%esi)
movq %mm1,8(%esi)
movq %mm2,16(%esi)
movq %mm3,24(%esi)
movq %mm4,32(%esi)
movq %mm5,40(%esi)
movq %mm6,48(%esi)
movq %mm7,56(%esi)
cmpl %eax,%edi
jb L007loop_ssse3
movl 76(%edx),%esp
emms
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 4,0x90
L002loop_x86:
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 16(%edi),%eax
movl 20(%edi),%ebx
movl 24(%edi),%ecx
movl 28(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 32(%edi),%eax
movl 36(%edi),%ebx
movl 40(%edi),%ecx
movl 44(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 48(%edi),%eax
movl 52(%edi),%ebx
movl 56(%edi),%ecx
movl 60(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 64(%edi),%eax
movl 68(%edi),%ebx
movl 72(%edi),%ecx
movl 76(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 80(%edi),%eax
movl 84(%edi),%ebx
movl 88(%edi),%ecx
movl 92(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 96(%edi),%eax
movl 100(%edi),%ebx
movl 104(%edi),%ecx
movl 108(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 112(%edi),%eax
movl 116(%edi),%ebx
movl 120(%edi),%ecx
movl 124(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
addl $128,%edi
subl $72,%esp
movl %edi,204(%esp)
leal 8(%esp),%edi
movl $16,%ecx
.long 2784229001
.align 4,0x90
L00900_15_x86:
movl 40(%esp),%ecx
movl 44(%esp),%edx
movl %ecx,%esi
shrl $9,%ecx
movl %edx,%edi
shrl $9,%edx
movl %ecx,%ebx
shll $14,%esi
movl %edx,%eax
shll $14,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%eax
shll $4,%esi
xorl %edx,%ebx
shll $4,%edi
xorl %esi,%ebx
shrl $4,%ecx
xorl %edi,%eax
shrl $4,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 48(%esp),%ecx
movl 52(%esp),%edx
movl 56(%esp),%esi
movl 60(%esp),%edi
addl 64(%esp),%eax
adcl 68(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
andl 40(%esp),%ecx
andl 44(%esp),%edx
addl 192(%esp),%eax
adcl 196(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
movl (%ebp),%esi
movl 4(%ebp),%edi
addl %ecx,%eax
adcl %edx,%ebx
movl 32(%esp),%ecx
movl 36(%esp),%edx
addl %esi,%eax
adcl %edi,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
addl %ecx,%eax
adcl %edx,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl %eax,32(%esp)
movl %ebx,36(%esp)
movl %ecx,%esi
shrl $2,%ecx
movl %edx,%edi
shrl $2,%edx
movl %ecx,%ebx
shll $4,%esi
movl %edx,%eax
shll $4,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%ebx
shll $21,%esi
xorl %edx,%eax
shll $21,%edi
xorl %esi,%eax
shrl $21,%ecx
xorl %edi,%ebx
shrl $21,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl 16(%esp),%esi
movl 20(%esp),%edi
addl (%esp),%eax
adcl 4(%esp),%ebx
orl %esi,%ecx
orl %edi,%edx
andl 24(%esp),%ecx
andl 28(%esp),%edx
andl 8(%esp),%esi
andl 12(%esp),%edi
orl %esi,%ecx
orl %edi,%edx
addl %ecx,%eax
adcl %edx,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
movb (%ebp),%dl
subl $8,%esp
leal 8(%ebp),%ebp
cmpb $148,%dl
jne L00900_15_x86
.align 4,0x90
L01016_79_x86:
movl 312(%esp),%ecx
movl 316(%esp),%edx
movl %ecx,%esi
shrl $1,%ecx
movl %edx,%edi
shrl $1,%edx
movl %ecx,%eax
shll $24,%esi
movl %edx,%ebx
shll $24,%edi
xorl %esi,%ebx
shrl $6,%ecx
xorl %edi,%eax
shrl $6,%edx
xorl %ecx,%eax
shll $7,%esi
xorl %edx,%ebx
shll $1,%edi
xorl %esi,%ebx
shrl $1,%ecx
xorl %edi,%eax
shrl $1,%edx
xorl %ecx,%eax
shll $6,%edi
xorl %edx,%ebx
xorl %edi,%eax
movl %eax,(%esp)
movl %ebx,4(%esp)
movl 208(%esp),%ecx
movl 212(%esp),%edx
movl %ecx,%esi
shrl $6,%ecx
movl %edx,%edi
shrl $6,%edx
movl %ecx,%eax
shll $3,%esi
movl %edx,%ebx
shll $3,%edi
xorl %esi,%eax
shrl $13,%ecx
xorl %edi,%ebx
shrl $13,%edx
xorl %ecx,%eax
shll $10,%esi
xorl %edx,%ebx
shll $10,%edi
xorl %esi,%ebx
shrl $10,%ecx
xorl %edi,%eax
shrl $10,%edx
xorl %ecx,%ebx
shll $13,%edi
xorl %edx,%eax
xorl %edi,%eax
movl 320(%esp),%ecx
movl 324(%esp),%edx
addl (%esp),%eax
adcl 4(%esp),%ebx
movl 248(%esp),%esi
movl 252(%esp),%edi
addl %ecx,%eax
adcl %edx,%ebx
addl %esi,%eax
adcl %edi,%ebx
movl %eax,192(%esp)
movl %ebx,196(%esp)
movl 40(%esp),%ecx
movl 44(%esp),%edx
movl %ecx,%esi
shrl $9,%ecx
movl %edx,%edi
shrl $9,%edx
movl %ecx,%ebx
shll $14,%esi
movl %edx,%eax
shll $14,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%eax
shll $4,%esi
xorl %edx,%ebx
shll $4,%edi
xorl %esi,%ebx
shrl $4,%ecx
xorl %edi,%eax
shrl $4,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 48(%esp),%ecx
movl 52(%esp),%edx
movl 56(%esp),%esi
movl 60(%esp),%edi
addl 64(%esp),%eax
adcl 68(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
andl 40(%esp),%ecx
andl 44(%esp),%edx
addl 192(%esp),%eax
adcl 196(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
movl (%ebp),%esi
movl 4(%ebp),%edi
addl %ecx,%eax
adcl %edx,%ebx
movl 32(%esp),%ecx
movl 36(%esp),%edx
addl %esi,%eax
adcl %edi,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
addl %ecx,%eax
adcl %edx,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl %eax,32(%esp)
movl %ebx,36(%esp)
movl %ecx,%esi
shrl $2,%ecx
movl %edx,%edi
shrl $2,%edx
movl %ecx,%ebx
shll $4,%esi
movl %edx,%eax
shll $4,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%ebx
shll $21,%esi
xorl %edx,%eax
shll $21,%edi
xorl %esi,%eax
shrl $21,%ecx
xorl %edi,%ebx
shrl $21,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl 16(%esp),%esi
movl 20(%esp),%edi
addl (%esp),%eax
adcl 4(%esp),%ebx
orl %esi,%ecx
orl %edi,%edx
andl 24(%esp),%ecx
andl 28(%esp),%edx
andl 8(%esp),%esi
andl 12(%esp),%edi
orl %esi,%ecx
orl %edi,%edx
addl %ecx,%eax
adcl %edx,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
movb (%ebp),%dl
subl $8,%esp
leal 8(%ebp),%ebp
cmpb $23,%dl
jne L01016_79_x86
movl 840(%esp),%esi
movl 844(%esp),%edi
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
addl 8(%esp),%eax
adcl 12(%esp),%ebx
movl %eax,(%esi)
movl %ebx,4(%esi)
addl 16(%esp),%ecx
adcl 20(%esp),%edx
movl %ecx,8(%esi)
movl %edx,12(%esi)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
addl 24(%esp),%eax
adcl 28(%esp),%ebx
movl %eax,16(%esi)
movl %ebx,20(%esi)
addl 32(%esp),%ecx
adcl 36(%esp),%edx
movl %ecx,24(%esi)
movl %edx,28(%esi)
movl 32(%esi),%eax
movl 36(%esi),%ebx
movl 40(%esi),%ecx
movl 44(%esi),%edx
addl 40(%esp),%eax
adcl 44(%esp),%ebx
movl %eax,32(%esi)
movl %ebx,36(%esi)
addl 48(%esp),%ecx
adcl 52(%esp),%edx
movl %ecx,40(%esi)
movl %edx,44(%esi)
movl 48(%esi),%eax
movl 52(%esi),%ebx
movl 56(%esi),%ecx
movl 60(%esi),%edx
addl 56(%esp),%eax
adcl 60(%esp),%ebx
movl %eax,48(%esi)
movl %ebx,52(%esi)
addl 64(%esp),%ecx
adcl 68(%esp),%edx
movl %ecx,56(%esi)
movl %edx,60(%esi)
addl $840,%esp
subl $640,%ebp
cmpl 8(%esp),%edi
jb L002loop_x86
movl 12(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 6,0x90
L001K512:
.long 3609767458,1116352408
.long 602891725,1899447441
.long 3964484399,3049323471
.long 2173295548,3921009573
.long 4081628472,961987163
.long 3053834265,1508970993
.long 2937671579,2453635748
.long 3664609560,2870763221
.long 2734883394,3624381080
.long 1164996542,310598401
.long 1323610764,607225278
.long 3590304994,1426881987
.long 4068182383,1925078388
.long 991336113,2162078206
.long 633803317,2614888103
.long 3479774868,3248222580
.long 2666613458,3835390401
.long 944711139,4022224774
.long 2341262773,264347078
.long 2007800933,604807628
.long 1495990901,770255983
.long 1856431235,1249150122
.long 3175218132,1555081692
.long 2198950837,1996064986
.long 3999719339,2554220882
.long 766784016,2821834349
.long 2566594879,2952996808
.long 3203337956,3210313671
.long 1034457026,3336571891
.long 2466948901,3584528711
.long 3758326383,113926993
.long 168717936,338241895
.long 1188179964,666307205
.long 1546045734,773529912
.long 1522805485,1294757372
.long 2643833823,1396182291
.long 2343527390,1695183700
.long 1014477480,1986661051
.long 1206759142,2177026350
.long 344077627,2456956037
.long 1290863460,2730485921
.long 3158454273,2820302411
.long 3505952657,3259730800
.long 106217008,3345764771
.long 3606008344,3516065817
.long 1432725776,3600352804
.long 1467031594,4094571909
.long 851169720,275423344
.long 3100823752,430227734
.long 1363258195,506948616
.long 3750685593,659060556
.long 3785050280,883997877
.long 3318307427,958139571
.long 3812723403,1322822218
.long 2003034995,1537002063
.long 3602036899,1747873779
.long 1575990012,1955562222
.long 1125592928,2024104815
.long 2716904306,2227730452
.long 442776044,2361852424
.long 593698344,2428436474
.long 3733110249,2756734187
.long 2999351573,3204031479
.long 3815920427,3329325298
.long 3928383900,3391569614
.long 566280711,3515267271
.long 3454069534,3940187606
.long 4000239992,4118630271
.long 1914138554,116418474
.long 2731055270,174292421
.long 3203993006,289380356
.long 320620315,460393269
.long 587496836,685471733
.long 1086792851,852142971
.long 365543100,1017036298
.long 2618297676,1126000580
.long 3409855158,1288033470
.long 4234509866,1501505948
.long 987167468,1607167915
.long 1246189591,1816402316
.long 67438087,66051
.long 202182159,134810123
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97
.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32
.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
.byte 62,0
.section __IMPORT,__pointers,non_lazy_symbol_pointers
L_OPENSSL_ia32cap_P$non_lazy_ptr:
.indirect_symbol _OPENSSL_ia32cap_P
.long 0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 6,387
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/fipsmodule/ghash-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _gcm_init_clmul
.private_extern _gcm_init_clmul
.align 4
_gcm_init_clmul:
L_gcm_init_clmul_begin:
movl 4(%esp),%edx
movl 8(%esp),%eax
call L000pic
L000pic:
popl %ecx
leal Lbswap-L000pic(%ecx),%ecx
movdqu (%eax),%xmm2
pshufd $78,%xmm2,%xmm2
pshufd $255,%xmm2,%xmm4
movdqa %xmm2,%xmm3
psllq $1,%xmm2
pxor %xmm5,%xmm5
psrlq $63,%xmm3
pcmpgtd %xmm4,%xmm5
pslldq $8,%xmm3
por %xmm3,%xmm2
pand 16(%ecx),%xmm5
pxor %xmm5,%xmm2
movdqa %xmm2,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm2,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm2,%xmm3
movdqu %xmm2,(%edx)
pxor %xmm0,%xmm4
movdqu %xmm0,16(%edx)
.byte 102,15,58,15,227,8
movdqu %xmm4,32(%edx)
ret
.globl _gcm_gmult_clmul
.private_extern _gcm_gmult_clmul
.align 4
_gcm_gmult_clmul:
L_gcm_gmult_clmul_begin:
movl 4(%esp),%eax
movl 8(%esp),%edx
call L001pic
L001pic:
popl %ecx
leal Lbswap-L001pic(%ecx),%ecx
movdqu (%eax),%xmm0
movdqa (%ecx),%xmm5
movups (%edx),%xmm2
.byte 102,15,56,0,197
movups 32(%edx),%xmm4
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.byte 102,15,56,0,197
movdqu %xmm0,(%eax)
ret
.globl _gcm_ghash_clmul
.private_extern _gcm_ghash_clmul
.align 4
_gcm_ghash_clmul:
L_gcm_ghash_clmul_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 28(%esp),%esi
movl 32(%esp),%ebx
call L002pic
L002pic:
popl %ecx
leal Lbswap-L002pic(%ecx),%ecx
movdqu (%eax),%xmm0
movdqa (%ecx),%xmm5
movdqu (%edx),%xmm2
.byte 102,15,56,0,197
subl $16,%ebx
jz L003odd_tail
movdqu (%esi),%xmm3
movdqu 16(%esi),%xmm6
.byte 102,15,56,0,221
.byte 102,15,56,0,245
movdqu 32(%edx),%xmm5
pxor %xmm3,%xmm0
pshufd $78,%xmm6,%xmm3
movdqa %xmm6,%xmm7
pxor %xmm6,%xmm3
leal 32(%esi),%esi
.byte 102,15,58,68,242,0
.byte 102,15,58,68,250,17
.byte 102,15,58,68,221,0
movups 16(%edx),%xmm2
nop
subl $32,%ebx
jbe L004even_tail
jmp L005mod_loop
.align 5,0x90
L005mod_loop:
pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
pxor %xmm0,%xmm4
nop
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,229,16
movups (%edx),%xmm2
xorps %xmm6,%xmm0
movdqa (%ecx),%xmm5
xorps %xmm7,%xmm1
movdqu (%esi),%xmm7
pxor %xmm0,%xmm3
movdqu 16(%esi),%xmm6
pxor %xmm1,%xmm3
.byte 102,15,56,0,253
pxor %xmm3,%xmm4
movdqa %xmm4,%xmm3
psrldq $8,%xmm4
pslldq $8,%xmm3
pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
.byte 102,15,56,0,245
pxor %xmm7,%xmm1
movdqa %xmm6,%xmm7
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
.byte 102,15,58,68,242,0
movups 32(%edx),%xmm5
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
pshufd $78,%xmm7,%xmm3
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm7,%xmm3
pxor %xmm4,%xmm1
.byte 102,15,58,68,250,17
movups 16(%edx),%xmm2
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.byte 102,15,58,68,221,0
leal 32(%esi),%esi
subl $32,%ebx
ja L005mod_loop
L004even_tail:
pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
pxor %xmm0,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,229,16
movdqa (%ecx),%xmm5
xorps %xmm6,%xmm0
xorps %xmm7,%xmm1
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
pxor %xmm3,%xmm4
movdqa %xmm4,%xmm3
psrldq $8,%xmm4
pslldq $8,%xmm3
pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
testl %ebx,%ebx
jnz L006done
movups (%edx),%xmm2
L003odd_tail:
movdqu (%esi),%xmm3
.byte 102,15,56,0,221
pxor %xmm3,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
L006done:
.byte 102,15,56,0,197
movdqu %xmm0,(%eax)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 6,0x90
Lbswap:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67
.byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112
.byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62
.byte 0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 19,084
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/mac-x86/crypto/chacha/chacha-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
.text
.globl _ChaCha20_ctr32_nohw
.private_extern _ChaCha20_ctr32_nohw
.align 4
_ChaCha20_ctr32_nohw:
L_ChaCha20_ctr32_nohw_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 32(%esp),%esi
movl 36(%esp),%edi
subl $132,%esp
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
movl %eax,80(%esp)
movl %ebx,84(%esp)
movl %ecx,88(%esp)
movl %edx,92(%esp)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
movl %eax,96(%esp)
movl %ebx,100(%esp)
movl %ecx,104(%esp)
movl %edx,108(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
subl $1,%eax
movl %eax,112(%esp)
movl %ebx,116(%esp)
movl %ecx,120(%esp)
movl %edx,124(%esp)
jmp L000entry
.align 4,0x90
L001outer_loop:
movl %ebx,156(%esp)
movl %eax,152(%esp)
movl %ecx,160(%esp)
L000entry:
movl $1634760805,%eax
movl $857760878,4(%esp)
movl $2036477234,8(%esp)
movl $1797285236,12(%esp)
movl 84(%esp),%ebx
movl 88(%esp),%ebp
movl 104(%esp),%ecx
movl 108(%esp),%esi
movl 116(%esp),%edx
movl 120(%esp),%edi
movl %ebx,20(%esp)
movl %ebp,24(%esp)
movl %ecx,40(%esp)
movl %esi,44(%esp)
movl %edx,52(%esp)
movl %edi,56(%esp)
movl 92(%esp),%ebx
movl 124(%esp),%edi
movl 112(%esp),%edx
movl 80(%esp),%ebp
movl 96(%esp),%ecx
movl 100(%esp),%esi
addl $1,%edx
movl %ebx,28(%esp)
movl %edi,60(%esp)
movl %edx,112(%esp)
movl $10,%ebx
jmp L002loop
.align 4,0x90
L002loop:
addl %ebp,%eax
movl %ebx,128(%esp)
movl %ebp,%ebx
xorl %eax,%edx
roll $16,%edx
addl %edx,%ecx
xorl %ecx,%ebx
movl 52(%esp),%edi
roll $12,%ebx
movl 20(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,48(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,32(%esp)
roll $16,%edi
movl %ebx,16(%esp)
addl %edi,%esi
movl 40(%esp),%ecx
xorl %esi,%ebp
movl 56(%esp),%edx
roll $12,%ebp
movl 24(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,52(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,36(%esp)
roll $16,%edx
movl %ebp,20(%esp)
addl %edx,%ecx
movl 44(%esp),%esi
xorl %ecx,%ebx
movl 60(%esp),%edi
roll $12,%ebx
movl 28(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,56(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,24(%esp)
addl %edi,%esi
xorl %esi,%ebp
roll $12,%ebp
movl 20(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,%edx
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
roll $16,%edx
movl %ebp,28(%esp)
addl %edx,%ecx
xorl %ecx,%ebx
movl 48(%esp),%edi
roll $12,%ebx
movl 24(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,60(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,40(%esp)
roll $16,%edi
movl %ebx,20(%esp)
addl %edi,%esi
movl 32(%esp),%ecx
xorl %esi,%ebp
movl 52(%esp),%edx
roll $12,%ebp
movl 28(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,48(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,44(%esp)
roll $16,%edx
movl %ebp,24(%esp)
addl %edx,%ecx
movl 36(%esp),%esi
xorl %ecx,%ebx
movl 56(%esp),%edi
roll $12,%ebx
movl 16(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,52(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,28(%esp)
addl %edi,%esi
xorl %esi,%ebp
movl 48(%esp),%edx
roll $12,%ebp
movl 128(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,56(%esp)
xorl %esi,%ebp
roll $7,%ebp
decl %ebx
jnz L002loop
movl 160(%esp),%ebx
addl $1634760805,%eax
addl 80(%esp),%ebp
addl 96(%esp),%ecx
addl 100(%esp),%esi
cmpl $64,%ebx
jb L003tail
movl 156(%esp),%ebx
addl 112(%esp),%edx
addl 120(%esp),%edi
xorl (%ebx),%eax
xorl 16(%ebx),%ebp
movl %eax,(%esp)
movl 152(%esp),%eax
xorl 32(%ebx),%ecx
xorl 36(%ebx),%esi
xorl 48(%ebx),%edx
xorl 56(%ebx),%edi
movl %ebp,16(%eax)
movl %ecx,32(%eax)
movl %esi,36(%eax)
movl %edx,48(%eax)
movl %edi,56(%eax)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
xorl 4(%ebx),%ebp
xorl 8(%ebx),%ecx
xorl 12(%ebx),%esi
xorl 20(%ebx),%edx
xorl 24(%ebx),%edi
movl %ebp,4(%eax)
movl %ecx,8(%eax)
movl %esi,12(%eax)
movl %edx,20(%eax)
movl %edi,24(%eax)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
xorl 28(%ebx),%ebp
xorl 40(%ebx),%ecx
xorl 44(%ebx),%esi
xorl 52(%ebx),%edx
xorl 60(%ebx),%edi
leal 64(%ebx),%ebx
movl %ebp,28(%eax)
movl (%esp),%ebp
movl %ecx,40(%eax)
movl 160(%esp),%ecx
movl %esi,44(%eax)
movl %edx,52(%eax)
movl %edi,60(%eax)
movl %ebp,(%eax)
leal 64(%eax),%eax
subl $64,%ecx
jnz L001outer_loop
jmp L004done
L003tail:
addl 112(%esp),%edx
addl 120(%esp),%edi
movl %eax,(%esp)
movl %ebp,16(%esp)
movl %ecx,32(%esp)
movl %esi,36(%esp)
movl %edx,48(%esp)
movl %edi,56(%esp)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
movl %ebp,4(%esp)
movl %ecx,8(%esp)
movl %esi,12(%esp)
movl %edx,20(%esp)
movl %edi,24(%esp)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
movl %ebp,28(%esp)
movl 156(%esp),%ebp
movl %ecx,40(%esp)
movl 152(%esp),%ecx
movl %esi,44(%esp)
xorl %esi,%esi
movl %edx,52(%esp)
movl %edi,60(%esp)
xorl %eax,%eax
xorl %edx,%edx
L005tail_loop:
movb (%esi,%ebp,1),%al
movb (%esp,%esi,1),%dl
leal 1(%esi),%esi
xorb %dl,%al
movb %al,-1(%ecx,%esi,1)
decl %ebx
jnz L005tail_loop
L004done:
addl $132,%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _ChaCha20_ctr32_ssse3
.private_extern _ChaCha20_ctr32_ssse3
.align 4
_ChaCha20_ctr32_ssse3:
L_ChaCha20_ctr32_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
call Lpic_point
Lpic_point:
popl %eax
movl 20(%esp),%edi
movl 24(%esp),%esi
movl 28(%esp),%ecx
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $524,%esp
andl $-64,%esp
movl %ebp,512(%esp)
leal Lssse3_data-Lpic_point(%eax),%eax
movdqu (%ebx),%xmm3
cmpl $256,%ecx
jb L0061x
movl %edx,516(%esp)
movl %ebx,520(%esp)
subl $256,%ecx
leal 384(%esp),%ebp
movdqu (%edx),%xmm7
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
paddd 48(%eax),%xmm0
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
psubd 64(%eax),%xmm0
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,64(%ebp)
movdqa %xmm1,80(%ebp)
movdqa %xmm2,96(%ebp)
movdqa %xmm3,112(%ebp)
movdqu 16(%edx),%xmm3
movdqa %xmm4,-64(%ebp)
movdqa %xmm5,-48(%ebp)
movdqa %xmm6,-32(%ebp)
movdqa %xmm7,-16(%ebp)
movdqa 32(%eax),%xmm7
leal 128(%esp),%ebx
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,(%ebp)
movdqa %xmm1,16(%ebp)
movdqa %xmm2,32(%ebp)
movdqa %xmm3,48(%ebp)
movdqa %xmm4,-128(%ebp)
movdqa %xmm5,-112(%ebp)
movdqa %xmm6,-96(%ebp)
movdqa %xmm7,-80(%ebp)
leal 128(%esi),%esi
leal 128(%edi),%edi
jmp L007outer_loop
.align 4,0x90
L007outer_loop:
movdqa -112(%ebp),%xmm1
movdqa -96(%ebp),%xmm2
movdqa -80(%ebp),%xmm3
movdqa -48(%ebp),%xmm5
movdqa -32(%ebp),%xmm6
movdqa -16(%ebp),%xmm7
movdqa %xmm1,-112(%ebx)
movdqa %xmm2,-96(%ebx)
movdqa %xmm3,-80(%ebx)
movdqa %xmm5,-48(%ebx)
movdqa %xmm6,-32(%ebx)
movdqa %xmm7,-16(%ebx)
movdqa 32(%ebp),%xmm2
movdqa 48(%ebp),%xmm3
movdqa 64(%ebp),%xmm4
movdqa 80(%ebp),%xmm5
movdqa 96(%ebp),%xmm6
movdqa 112(%ebp),%xmm7
paddd 64(%eax),%xmm4
movdqa %xmm2,32(%ebx)
movdqa %xmm3,48(%ebx)
movdqa %xmm4,64(%ebx)
movdqa %xmm5,80(%ebx)
movdqa %xmm6,96(%ebx)
movdqa %xmm7,112(%ebx)
movdqa %xmm4,64(%ebp)
movdqa -128(%ebp),%xmm0
movdqa %xmm4,%xmm6
movdqa -64(%ebp),%xmm3
movdqa (%ebp),%xmm4
movdqa 16(%ebp),%xmm5
movl $10,%edx
nop
.align 4,0x90
L008loop:
paddd %xmm3,%xmm0
movdqa %xmm3,%xmm2
pxor %xmm0,%xmm6
pshufb (%eax),%xmm6
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -48(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 80(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,64(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-64(%ebx)
paddd %xmm7,%xmm5
movdqa 32(%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -32(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 96(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,80(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,16(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-48(%ebx)
paddd %xmm6,%xmm4
movdqa 48(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -16(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 112(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,96(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-32(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa -48(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,%xmm6
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
pshufb (%eax),%xmm6
movdqa %xmm3,-16(%ebx)
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -32(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 64(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,112(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,32(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-48(%ebx)
paddd %xmm7,%xmm5
movdqa (%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -16(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 80(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,64(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,48(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-32(%ebx)
paddd %xmm6,%xmm4
movdqa 16(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -64(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 96(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,80(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-16(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 64(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,96(%ebx)
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
por %xmm1,%xmm3
decl %edx
jnz L008loop
movdqa %xmm3,-64(%ebx)
movdqa %xmm4,(%ebx)
movdqa %xmm5,16(%ebx)
movdqa %xmm6,64(%ebx)
movdqa %xmm7,96(%ebx)
movdqa -112(%ebx),%xmm1
movdqa -96(%ebx),%xmm2
movdqa -80(%ebx),%xmm3
paddd -128(%ebp),%xmm0
paddd -112(%ebp),%xmm1
paddd -96(%ebp),%xmm2
paddd -80(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa -64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa -48(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa -32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa -16(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd -64(%ebp),%xmm0
paddd -48(%ebp),%xmm1
paddd -32(%ebp),%xmm2
paddd -16(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa (%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 16(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 48(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd (%ebp),%xmm0
paddd 16(%ebp),%xmm1
paddd 32(%ebp),%xmm2
paddd 48(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa 64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 80(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 96(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 112(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd 64(%ebp),%xmm0
paddd 80(%ebp),%xmm1
paddd 96(%ebp),%xmm2
paddd 112(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 208(%esi),%esi
pxor %xmm0,%xmm4
pxor %xmm1,%xmm5
pxor %xmm2,%xmm6
pxor %xmm3,%xmm7
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 208(%edi),%edi
subl $256,%ecx
jnc L007outer_loop
addl $256,%ecx
jz L009done
movl 520(%esp),%ebx
leal -128(%esi),%esi
movl 516(%esp),%edx
leal -128(%edi),%edi
movd 64(%ebp),%xmm2
movdqu (%ebx),%xmm3
paddd 96(%eax),%xmm2
pand 112(%eax),%xmm3
por %xmm2,%xmm3
L0061x:
movdqa 32(%eax),%xmm0
movdqu (%edx),%xmm1
movdqu 16(%edx),%xmm2
movdqa (%eax),%xmm6
movdqa 16(%eax),%xmm7
movl %ebp,48(%esp)
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
movl $10,%edx
jmp L010loop1x
.align 4,0x90
L011outer1x:
movdqa 80(%eax),%xmm3
movdqa (%esp),%xmm0
movdqa 16(%esp),%xmm1
movdqa 32(%esp),%xmm2
paddd 48(%esp),%xmm3
movl $10,%edx
movdqa %xmm3,48(%esp)
jmp L010loop1x
.align 4,0x90
L010loop1x:
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $57,%xmm1,%xmm1
pshufd $147,%xmm3,%xmm3
nop
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $147,%xmm1,%xmm1
pshufd $57,%xmm3,%xmm3
decl %edx
jnz L010loop1x
paddd (%esp),%xmm0
paddd 16(%esp),%xmm1
paddd 32(%esp),%xmm2
paddd 48(%esp),%xmm3
cmpl $64,%ecx
jb L012tail
movdqu (%esi),%xmm4
movdqu 16(%esi),%xmm5
pxor %xmm4,%xmm0
movdqu 32(%esi),%xmm4
pxor %xmm5,%xmm1
movdqu 48(%esi),%xmm5
pxor %xmm4,%xmm2
pxor %xmm5,%xmm3
leal 64(%esi),%esi
movdqu %xmm0,(%edi)
movdqu %xmm1,16(%edi)
movdqu %xmm2,32(%edi)
movdqu %xmm3,48(%edi)
leal 64(%edi),%edi
subl $64,%ecx
jnz L011outer1x
jmp L009done
L012tail:
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
xorl %eax,%eax
xorl %edx,%edx
xorl %ebp,%ebp
L013tail_loop:
movb (%esp,%ebp,1),%al
movb (%esi,%ebp,1),%dl
leal 1(%ebp),%ebp
xorb %dl,%al
movb %al,-1(%edi,%ebp,1)
decl %ecx
jnz L013tail_loop
L009done:
movl 512(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 6,0x90
Lssse3_data:
.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
.long 1634760805,857760878,2036477234,1797285236
.long 0,1,2,3
.long 4,4,4,4
.long 1,0,0,0
.long 4,0,0,0
.long 0,-1,-1,-1
.align 6,0x90
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54
.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32
.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111
.byte 114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__)
|
wlsfx/bnbb
| 6,847
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/test/trampoline-armv4.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
.syntax unified
.text
@ abi_test_trampoline loads callee-saved registers from |state|, calls |func|
@ with |argv|, then saves the callee-saved registers into |state|. It returns
@ the result of |func|. The |unwind| argument is unused.
@ uint32_t abi_test_trampoline(void (*func)(...), CallerState *state,
@ const uint32_t *argv, size_t argc,
@ int unwind);
.globl _abi_test_trampoline
.private_extern _abi_test_trampoline
.align 4
_abi_test_trampoline:
@ Save parameters and all callee-saved registers. For convenience, we
@ save r9 on iOS even though it's volatile.
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
stmdb sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,lr}
@ Reserve stack space for six (10-4) stack parameters, plus an extra 4
@ bytes to keep it 8-byte-aligned (see AAPCS, section 5.3).
sub sp, sp, #28
@ Every register in AAPCS is either non-volatile or a parameter (except
@ r9 on iOS), so this code, by the actual call, loses all its scratch
@ registers. First fill in stack parameters while there are registers
@ to spare.
cmp r3, #4
bls Lstack_args_done
mov r4, sp @ r4 is the output pointer.
add r5, r2, r3, lsl #2 @ Set r5 to the end of argv.
add r2, r2, #16 @ Skip four arguments.
Lstack_args_loop:
ldr r6, [r2], #4
cmp r2, r5
str r6, [r4], #4
bne Lstack_args_loop
Lstack_args_done:
@ Load registers from |r1|.
vldmia r1!, {d8,d9,d10,d11,d12,d13,d14,d15}
#if defined(__APPLE__)
@ r9 is not volatile on iOS.
ldmia r1!, {r4,r5,r6,r7,r8,r10-r11}
#else
ldmia r1!, {r4,r5,r6,r7,r8,r9,r10,r11}
#endif
@ Load register parameters. This uses up our remaining registers, so we
@ repurpose lr as scratch space.
ldr r3, [sp, #40] @ Reload argc.
ldr lr, [sp, #36] @ Load argv into lr.
cmp r3, #3
bhi Larg_r3
beq Larg_r2
cmp r3, #1
bhi Larg_r1
beq Larg_r0
b Largs_done
Larg_r3:
ldr r3, [lr, #12] @ argv[3]
Larg_r2:
ldr r2, [lr, #8] @ argv[2]
Larg_r1:
ldr r1, [lr, #4] @ argv[1]
Larg_r0:
ldr r0, [lr] @ argv[0]
Largs_done:
@ With every other register in use, load the function pointer into lr
@ and call the function.
ldr lr, [sp, #28]
blx lr
@ r1-r3 are free for use again. The trampoline only supports
@ single-return functions. Pass r4-r11 to the caller.
ldr r1, [sp, #32]
vstmia r1!, {d8,d9,d10,d11,d12,d13,d14,d15}
#if defined(__APPLE__)
@ r9 is not volatile on iOS.
stmia r1!, {r4,r5,r6,r7,r8,r10-r11}
#else
stmia r1!, {r4,r5,r6,r7,r8,r9,r10,r11}
#endif
@ Unwind the stack and restore registers.
add sp, sp, #44 @ 44 = 28+16
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10,r11,lr} @ Skip r0-r3 (see +16 above).
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
bx lr
.globl _abi_test_clobber_r0
.private_extern _abi_test_clobber_r0
.align 4
_abi_test_clobber_r0:
mov r0, #0
bx lr
.globl _abi_test_clobber_r1
.private_extern _abi_test_clobber_r1
.align 4
_abi_test_clobber_r1:
mov r1, #0
bx lr
.globl _abi_test_clobber_r2
.private_extern _abi_test_clobber_r2
.align 4
_abi_test_clobber_r2:
mov r2, #0
bx lr
.globl _abi_test_clobber_r3
.private_extern _abi_test_clobber_r3
.align 4
_abi_test_clobber_r3:
mov r3, #0
bx lr
.globl _abi_test_clobber_r4
.private_extern _abi_test_clobber_r4
.align 4
_abi_test_clobber_r4:
mov r4, #0
bx lr
.globl _abi_test_clobber_r5
.private_extern _abi_test_clobber_r5
.align 4
_abi_test_clobber_r5:
mov r5, #0
bx lr
.globl _abi_test_clobber_r6
.private_extern _abi_test_clobber_r6
.align 4
_abi_test_clobber_r6:
mov r6, #0
bx lr
.globl _abi_test_clobber_r7
.private_extern _abi_test_clobber_r7
.align 4
_abi_test_clobber_r7:
mov r7, #0
bx lr
.globl _abi_test_clobber_r8
.private_extern _abi_test_clobber_r8
.align 4
_abi_test_clobber_r8:
mov r8, #0
bx lr
.globl _abi_test_clobber_r9
.private_extern _abi_test_clobber_r9
.align 4
_abi_test_clobber_r9:
mov r9, #0
bx lr
.globl _abi_test_clobber_r10
.private_extern _abi_test_clobber_r10
.align 4
_abi_test_clobber_r10:
mov r10, #0
bx lr
.globl _abi_test_clobber_r11
.private_extern _abi_test_clobber_r11
.align 4
_abi_test_clobber_r11:
mov r11, #0
bx lr
.globl _abi_test_clobber_r12
.private_extern _abi_test_clobber_r12
.align 4
_abi_test_clobber_r12:
mov r12, #0
bx lr
.globl _abi_test_clobber_d0
.private_extern _abi_test_clobber_d0
.align 4
_abi_test_clobber_d0:
mov r0, #0
vmov s0, r0
vmov s1, r0
bx lr
.globl _abi_test_clobber_d1
.private_extern _abi_test_clobber_d1
.align 4
_abi_test_clobber_d1:
mov r0, #0
vmov s2, r0
vmov s3, r0
bx lr
.globl _abi_test_clobber_d2
.private_extern _abi_test_clobber_d2
.align 4
_abi_test_clobber_d2:
mov r0, #0
vmov s4, r0
vmov s5, r0
bx lr
.globl _abi_test_clobber_d3
.private_extern _abi_test_clobber_d3
.align 4
_abi_test_clobber_d3:
mov r0, #0
vmov s6, r0
vmov s7, r0
bx lr
.globl _abi_test_clobber_d4
.private_extern _abi_test_clobber_d4
.align 4
_abi_test_clobber_d4:
mov r0, #0
vmov s8, r0
vmov s9, r0
bx lr
.globl _abi_test_clobber_d5
.private_extern _abi_test_clobber_d5
.align 4
_abi_test_clobber_d5:
mov r0, #0
vmov s10, r0
vmov s11, r0
bx lr
.globl _abi_test_clobber_d6
.private_extern _abi_test_clobber_d6
.align 4
_abi_test_clobber_d6:
mov r0, #0
vmov s12, r0
vmov s13, r0
bx lr
.globl _abi_test_clobber_d7
.private_extern _abi_test_clobber_d7
.align 4
_abi_test_clobber_d7:
mov r0, #0
vmov s14, r0
vmov s15, r0
bx lr
.globl _abi_test_clobber_d8
.private_extern _abi_test_clobber_d8
.align 4
_abi_test_clobber_d8:
mov r0, #0
vmov s16, r0
vmov s17, r0
bx lr
.globl _abi_test_clobber_d9
.private_extern _abi_test_clobber_d9
.align 4
_abi_test_clobber_d9:
mov r0, #0
vmov s18, r0
vmov s19, r0
bx lr
.globl _abi_test_clobber_d10
.private_extern _abi_test_clobber_d10
.align 4
_abi_test_clobber_d10:
mov r0, #0
vmov s20, r0
vmov s21, r0
bx lr
.globl _abi_test_clobber_d11
.private_extern _abi_test_clobber_d11
.align 4
_abi_test_clobber_d11:
mov r0, #0
vmov s22, r0
vmov s23, r0
bx lr
.globl _abi_test_clobber_d12
.private_extern _abi_test_clobber_d12
.align 4
_abi_test_clobber_d12:
mov r0, #0
vmov s24, r0
vmov s25, r0
bx lr
.globl _abi_test_clobber_d13
.private_extern _abi_test_clobber_d13
.align 4
_abi_test_clobber_d13:
mov r0, #0
vmov s26, r0
vmov s27, r0
bx lr
.globl _abi_test_clobber_d14
.private_extern _abi_test_clobber_d14
.align 4
_abi_test_clobber_d14:
mov r0, #0
vmov s28, r0
vmov s29, r0
bx lr
.globl _abi_test_clobber_d15
.private_extern _abi_test_clobber_d15
.align 4
_abi_test_clobber_d15:
mov r0, #0
vmov s30, r0
vmov s31, r0
bx lr
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
wlsfx/bnbb
| 19,155
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/fipsmodule/aesv8-armx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
#include <openssl/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.code 32
#undef __thumb2__
.align 5
Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl _aes_hw_set_encrypt_key
.private_extern _aes_hw_set_encrypt_key
#ifdef __thumb2__
.thumb_func _aes_hw_set_encrypt_key
#endif
.align 5
_aes_hw_set_encrypt_key:
Lenc_key:
mov r3,#-1
cmp r0,#0
beq Lenc_key_abort
cmp r2,#0
beq Lenc_key_abort
mov r3,#-2
cmp r1,#128
blt Lenc_key_abort
cmp r1,#256
bgt Lenc_key_abort
tst r1,#0x3f
bne Lenc_key_abort
adr r3,Lrcon
cmp r1,#192
veor q0,q0,q0
vld1.8 {q3},[r0]!
mov r1,#8 @ reuse r1
vld1.32 {q1,q2},[r3]!
blt Loop128
beq L192
b L256
.align 4
Loop128:
vtbl.8 d20,{q3},d4
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
vshl.u8 q1,q1,#1
veor q3,q3,q10
bne Loop128
vld1.32 {q1},[r3]
vtbl.8 d20,{q3},d4
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
vshl.u8 q1,q1,#1
veor q3,q3,q10
vtbl.8 d20,{q3},d4
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
veor q3,q3,q10
vst1.32 {q3},[r2]
add r2,r2,#0x50
mov r12,#10
b Ldone
.align 4
L192:
vld1.8 {d16},[r0]!
vmov.i8 q10,#8 @ borrow q10
vst1.32 {q3},[r2]!
vsub.i8 q2,q2,q10 @ adjust the mask
Loop192:
vtbl.8 d20,{q8},d4
vtbl.8 d21,{q8},d5
vext.8 q9,q0,q3,#12
vst1.32 {d16},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vdup.32 q9,d7[1]
veor q9,q9,q8
veor q10,q10,q1
vext.8 q8,q0,q8,#12
vshl.u8 q1,q1,#1
veor q8,q8,q9
veor q3,q3,q10
veor q8,q8,q10
vst1.32 {q3},[r2]!
bne Loop192
mov r12,#12
add r2,r2,#0x20
b Ldone
.align 4
L256:
vld1.8 {q8},[r0]
mov r1,#7
mov r12,#14
vst1.32 {q3},[r2]!
Loop256:
vtbl.8 d20,{q8},d4
vtbl.8 d21,{q8},d5
vext.8 q9,q0,q3,#12
vst1.32 {q8},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
vshl.u8 q1,q1,#1
veor q3,q3,q10
vst1.32 {q3},[r2]!
beq Ldone
vdup.32 q10,d7[1]
vext.8 q9,q0,q8,#12
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q8,q8,q9
vext.8 q9,q0,q9,#12
veor q8,q8,q9
vext.8 q9,q0,q9,#12
veor q8,q8,q9
veor q8,q8,q10
b Loop256
Ldone:
str r12,[r2]
mov r3,#0
Lenc_key_abort:
mov r0,r3 @ return value
bx lr
.globl _aes_hw_set_decrypt_key
.private_extern _aes_hw_set_decrypt_key
#ifdef __thumb2__
.thumb_func _aes_hw_set_decrypt_key
#endif
.align 5
_aes_hw_set_decrypt_key:
stmdb sp!,{r4,lr}
bl Lenc_key
cmp r0,#0
bne Ldec_key_abort
sub r2,r2,#240 @ restore original r2
mov r4,#-16
add r0,r2,r12,lsl#4 @ end of key schedule
vld1.32 {q0},[r2]
vld1.32 {q1},[r0]
vst1.32 {q0},[r0],r4
vst1.32 {q1},[r2]!
Loop_imc:
vld1.32 {q0},[r2]
vld1.32 {q1},[r0]
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
vst1.32 {q0},[r0],r4
vst1.32 {q1},[r2]!
cmp r0,r2
bhi Loop_imc
vld1.32 {q0},[r2]
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
vst1.32 {q0},[r0]
eor r0,r0,r0 @ return value
Ldec_key_abort:
ldmia sp!,{r4,pc}
.globl _aes_hw_encrypt
.private_extern _aes_hw_encrypt
#ifdef __thumb2__
.thumb_func _aes_hw_encrypt
#endif
.align 5
_aes_hw_encrypt:
AARCH64_VALID_CALL_TARGET
ldr r3,[r2,#240]
vld1.32 {q0},[r2]!
vld1.8 {q2},[r0]
sub r3,r3,#2
vld1.32 {q1},[r2]!
Loop_enc:
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q0},[r2]!
subs r3,r3,#2
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q1},[r2]!
bgt Loop_enc
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q0},[r2]
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
veor q2,q2,q0
vst1.8 {q2},[r1]
bx lr
.globl _aes_hw_decrypt
.private_extern _aes_hw_decrypt
#ifdef __thumb2__
.thumb_func _aes_hw_decrypt
#endif
.align 5
_aes_hw_decrypt:
AARCH64_VALID_CALL_TARGET
ldr r3,[r2,#240]
vld1.32 {q0},[r2]!
vld1.8 {q2},[r0]
sub r3,r3,#2
vld1.32 {q1},[r2]!
Loop_dec:
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q0},[r2]!
subs r3,r3,#2
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q1},[r2]!
bgt Loop_dec
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q0},[r2]
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
veor q2,q2,q0
vst1.8 {q2},[r1]
bx lr
.globl _aes_hw_cbc_encrypt
.private_extern _aes_hw_cbc_encrypt
#ifdef __thumb2__
.thumb_func _aes_hw_cbc_encrypt
#endif
.align 5
_aes_hw_cbc_encrypt:
mov ip,sp
stmdb sp!,{r4,r5,r6,r7,r8,lr}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldmia ip,{r4,r5} @ load remaining args
subs r2,r2,#16
mov r8,#16
blo Lcbc_abort
moveq r8,#0
cmp r5,#0 @ en- or decrypting?
ldr r5,[r3,#240]
and r2,r2,#-16
vld1.8 {q6},[r4]
vld1.8 {q0},[r0],r8
vld1.32 {q8,q9},[r3] @ load key schedule...
sub r5,r5,#6
add r7,r3,r5,lsl#4 @ pointer to last 7 round keys
sub r5,r5,#2
vld1.32 {q10,q11},[r7]!
vld1.32 {q12,q13},[r7]!
vld1.32 {q14,q15},[r7]!
vld1.32 {q7},[r7]
add r7,r3,#32
mov r6,r5
beq Lcbc_dec
cmp r5,#2
veor q0,q0,q6
veor q5,q8,q7
beq Lcbc_enc128
vld1.32 {q2,q3},[r7]
add r7,r3,#16
add r6,r3,#16*4
add r12,r3,#16*5
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
add r14,r3,#16*6
add r3,r3,#16*7
b Lenter_cbc_enc
.align 4
Loop_cbc_enc:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vst1.8 {q6},[r1]!
Lenter_cbc_enc:
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q8},[r6]
cmp r5,#4
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r12]
beq Lcbc_enc192
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q8},[r14]
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r3]
nop
Lcbc_enc192:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
subs r2,r2,#16
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
moveq r8,#0
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.8 {q8},[r0],r8
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
veor q8,q8,q5
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r7] @ re-pre-load rndkey[1]
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
veor q6,q0,q7
bhs Loop_cbc_enc
vst1.8 {q6},[r1]!
b Lcbc_done
.align 5
Lcbc_enc128:
vld1.32 {q2,q3},[r7]
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
b Lenter_cbc_enc128
Loop_cbc_enc128:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vst1.8 {q6},[r1]!
Lenter_cbc_enc128:
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
subs r2,r2,#16
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
moveq r8,#0
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.8 {q8},[r0],r8
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
veor q8,q8,q5
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
veor q6,q0,q7
bhs Loop_cbc_enc128
vst1.8 {q6},[r1]!
b Lcbc_done
.align 5
Lcbc_dec:
vld1.8 {q10},[r0]!
subs r2,r2,#32 @ bias
add r6,r5,#2
vorr q3,q0,q0
vorr q1,q0,q0
vorr q11,q10,q10
blo Lcbc_dec_tail
vorr q1,q10,q10
vld1.8 {q10},[r0]!
vorr q2,q0,q0
vorr q3,q1,q1
vorr q11,q10,q10
Loop3x_cbc_dec:
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q9},[r7]!
bgt Loop3x_cbc_dec
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q4,q6,q7
subs r2,r2,#0x30
veor q5,q2,q7
movlo r6,r2 @ r6, r6, is zero at this point
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q9,q3,q7
add r0,r0,r6 @ r0 is adjusted in such way that
@ at exit from the loop q1-q10
@ are loaded with last "words"
vorr q6,q11,q11
mov r7,r3
.byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q2},[r0]!
.byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q3},[r0]!
.byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q11},[r0]!
.byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
add r6,r5,#2
veor q4,q4,q0
veor q5,q5,q1
veor q10,q10,q9
vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
vst1.8 {q4},[r1]!
vorr q0,q2,q2
vst1.8 {q5},[r1]!
vorr q1,q3,q3
vst1.8 {q10},[r1]!
vorr q10,q11,q11
bhs Loop3x_cbc_dec
cmn r2,#0x30
beq Lcbc_done
nop
Lcbc_dec_tail:
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q9},[r7]!
bgt Lcbc_dec_tail
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
cmn r2,#0x20
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q5,q6,q7
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q9,q3,q7
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
beq Lcbc_dec_one
veor q5,q5,q1
veor q9,q9,q10
vorr q6,q11,q11
vst1.8 {q5},[r1]!
vst1.8 {q9},[r1]!
b Lcbc_done
Lcbc_dec_one:
veor q5,q5,q10
vorr q6,q11,q11
vst1.8 {q5},[r1]!
Lcbc_done:
vst1.8 {q6},[r4]
Lcbc_abort:
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,pc}
.globl _aes_hw_ctr32_encrypt_blocks
.private_extern _aes_hw_ctr32_encrypt_blocks
#ifdef __thumb2__
.thumb_func _aes_hw_ctr32_encrypt_blocks
#endif
.align 5
_aes_hw_ctr32_encrypt_blocks:
mov ip,sp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldr r4, [ip] @ load remaining arg
ldr r5,[r3,#240]
ldr r8, [r4, #12]
vld1.32 {q0},[r4]
vld1.32 {q8,q9},[r3] @ load key schedule...
sub r5,r5,#4
mov r12,#16
cmp r2,#2
add r7,r3,r5,lsl#4 @ pointer to last 5 round keys
sub r5,r5,#2
vld1.32 {q12,q13},[r7]!
vld1.32 {q14,q15},[r7]!
vld1.32 {q7},[r7]
add r7,r3,#32
mov r6,r5
@ ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
@ affected by silicon errata #1742098 [0] and #1655431 [1],
@ respectively, where the second instruction of an aese/aesmc
@ instruction pair may execute twice if an interrupt is taken right
@ after the first instruction consumes an input register of which a
@ single 32-bit lane has been updated the last time it was modified.
@
@ This function uses a counter in one 32-bit lane. The
@ could write to q1 and q10 directly, but that trips this bugs.
@ We write to q6 and copy to the final register as a workaround.
@
@ [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
@ [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __ARMEB__
rev r8, r8
#endif
add r10, r8, #1
vorr q6,q0,q0
rev r10, r10
vmov.32 d13[1],r10
add r8, r8, #2
vorr q1,q6,q6
bls Lctr32_tail
rev r12, r8
vmov.32 d13[1],r12
sub r2,r2,#3 @ bias
vorr q10,q6,q6
b Loop3x_ctr32
.align 4
Loop3x_ctr32:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.32 {q9},[r7]!
bgt Loop3x_ctr32
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
vld1.8 {q2},[r0]!
add r9,r8,#1
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.8 {q3},[r0]!
rev r9,r9
.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
vld1.8 {q11},[r0]!
mov r7,r3
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
veor q2,q2,q7
add r10,r8,#2
.byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
veor q3,q3,q7
add r8,r8,#3
.byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
@ Note the logic to update q0, q1, and q1 is written to work
@ around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
@ 32-bit mode. See the comment above.
veor q11,q11,q7
vmov.32 d13[1], r9
.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vorr q0,q6,q6
rev r10,r10
.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
vmov.32 d13[1], r10
rev r12,r8
.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
vorr q1,q6,q6
vmov.32 d13[1], r12
.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vorr q10,q6,q6
subs r2,r2,#3
.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
.byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15
veor q2,q2,q4
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
vst1.8 {q2},[r1]!
veor q3,q3,q5
mov r6,r5
vst1.8 {q3},[r1]!
veor q11,q11,q9
vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
vst1.8 {q11},[r1]!
bhs Loop3x_ctr32
adds r2,r2,#3
beq Lctr32_done
Lctr32_tail:
cmp r2,#1
blt Lctr32_done @ if len = 0, go to done
mov r12,#16
moveq r12,#0
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.32 {q9},[r7]!
bgt Lctr32_tail
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.8 {q2},[r0],r12
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.8 {q3},[r0]
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
veor q2,q2,q7
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
veor q3,q3,q7
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
.byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15
veor q2,q2,q0
veor q3,q3,q1
vst1.8 {q2},[r1]!
cmp r12, #0
beq Lctr32_done
vst1.8 {q3},[r1]
Lctr32_done:
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
wlsfx/bnbb
| 32,164
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/fipsmodule/bsaes-armv7.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
@ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the OpenSSL license (the "License"). You may not use
@ this file except in compliance with the License. You can obtain a copy
@ in the file LICENSE in the source distribution or at
@ https://www.openssl.org/source/license.html
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@
@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
@ of Linaro. Permission to use under GPL terms is granted.
@ ====================================================================
@ Bit-sliced AES for ARM NEON
@
@ February 2012.
@
@ This implementation is direct adaptation of bsaes-x86_64 module for
@ ARM NEON. Except that this module is endian-neutral [in sense that
@ it can be compiled for either endianness] by courtesy of vld1.8's
@ neutrality. Initial version doesn't implement interface to OpenSSL,
@ only low-level primitives and unsupported entry points, just enough
@ to collect performance results, which for Cortex-A8 core are:
@
@ encrypt 19.5 cycles per byte processed with 128-bit key
@ decrypt 22.1 cycles per byte processed with 128-bit key
@ key conv. 440 cycles per 128-bit key/0.18 of 8x block
@
@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
@ which is [much] worse than anticipated (for further details see
@ http://www.openssl.org/~appro/Snapdragon-S4.html).
@
@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
@ manages in 20.0 cycles].
@
@ When comparing to x86_64 results keep in mind that NEON unit is
@ [mostly] single-issue and thus can't [fully] benefit from
@ instruction-level parallelism. And when comparing to aes-armv4
@ results keep in mind key schedule conversion overhead (see
@ bsaes-x86_64.pl for further details)...
@
@ <appro@openssl.org>
@ April-August 2013
@ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard.
#ifndef __KERNEL__
# include <openssl/arm_arch.h>
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
# define VFP_ABI_FRAME 0x40
#else
# define VFP_ABI_PUSH
# define VFP_ABI_POP
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define XTS_CHAIN_TWEAK
# define __ARM_MAX_ARCH__ 7
#endif
#ifdef __thumb__
# define adrl adr
#endif
#if __ARM_MAX_ARCH__>=7
.text
.syntax unified @ ARMv7-capable assembler is expected to handle this
#if defined(__thumb2__) && !defined(__APPLE__)
.thumb
#else
.code 32
# undef __thumb2__
#endif
#ifdef __thumb2__
.thumb_func _bsaes_decrypt8
#endif
.align 4
_bsaes_decrypt8:
adr r6,.
vldmia r4!, {q9} @ round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,LM0ISR
#else
add r6,r6,#LM0ISR-_bsaes_decrypt8
#endif
vldmia r6!, {q8} @ LM0ISR
veor q10, q0, q9 @ xor with round0 key
veor q11, q1, q9
vtbl.8 d0, {q10}, d16
vtbl.8 d1, {q10}, d17
veor q12, q2, q9
vtbl.8 d2, {q11}, d16
vtbl.8 d3, {q11}, d17
veor q13, q3, q9
vtbl.8 d4, {q12}, d16
vtbl.8 d5, {q12}, d17
veor q14, q4, q9
vtbl.8 d6, {q13}, d16
vtbl.8 d7, {q13}, d17
veor q15, q5, q9
vtbl.8 d8, {q14}, d16
vtbl.8 d9, {q14}, d17
veor q10, q6, q9
vtbl.8 d10, {q15}, d16
vtbl.8 d11, {q15}, d17
veor q11, q7, q9
vtbl.8 d12, {q10}, d16
vtbl.8 d13, {q10}, d17
vtbl.8 d14, {q11}, d16
vtbl.8 d15, {q11}, d17
vmov.i8 q8,#0x55 @ compose LBS0
vmov.i8 q9,#0x33 @ compose LBS1
vshr.u64 q10, q6, #1
vshr.u64 q11, q4, #1
veor q10, q10, q7
veor q11, q11, q5
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #1
veor q5, q5, q11
vshl.u64 q11, q11, #1
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q2, #1
vshr.u64 q11, q0, #1
veor q10, q10, q3
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q3, q3, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose LBS2
vshr.u64 q10, q5, #2
vshr.u64 q11, q4, #2
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q9
vand q11, q11, q9
veor q7, q7, q10
vshl.u64 q10, q10, #2
veor q6, q6, q11
vshl.u64 q11, q11, #2
veor q5, q5, q10
veor q4, q4, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q3
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q3, q3, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q3, #4
vshr.u64 q11, q2, #4
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q6, q6, q11
vshl.u64 q11, q11, #4
veor q3, q3, q10
veor q2, q2, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q5
veor q11, q11, q4
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q4, q4, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
sub r5,r5,#1
b Ldec_sbox
.align 4
Ldec_loop:
vldmia r4!, {q8,q9,q10,q11}
veor q8, q8, q0
veor q9, q9, q1
vtbl.8 d0, {q8}, d24
vtbl.8 d1, {q8}, d25
vldmia r4!, {q8}
veor q10, q10, q2
vtbl.8 d2, {q9}, d24
vtbl.8 d3, {q9}, d25
vldmia r4!, {q9}
veor q11, q11, q3
vtbl.8 d4, {q10}, d24
vtbl.8 d5, {q10}, d25
vldmia r4!, {q10}
vtbl.8 d6, {q11}, d24
vtbl.8 d7, {q11}, d25
vldmia r4!, {q11}
veor q8, q8, q4
veor q9, q9, q5
vtbl.8 d8, {q8}, d24
vtbl.8 d9, {q8}, d25
veor q10, q10, q6
vtbl.8 d10, {q9}, d24
vtbl.8 d11, {q9}, d25
veor q11, q11, q7
vtbl.8 d12, {q10}, d24
vtbl.8 d13, {q10}, d25
vtbl.8 d14, {q11}, d24
vtbl.8 d15, {q11}, d25
Ldec_sbox:
veor q1, q1, q4
veor q3, q3, q4
veor q4, q4, q7
veor q1, q1, q6
veor q2, q2, q7
veor q6, q6, q4
veor q0, q0, q1
veor q2, q2, q5
veor q7, q7, q6
veor q3, q3, q0
veor q5, q5, q0
veor q1, q1, q3
veor q11, q3, q0
veor q10, q7, q4
veor q9, q1, q6
veor q13, q4, q0
vmov q8, q10
veor q12, q5, q2
vorr q10, q10, q9
veor q15, q11, q8
vand q14, q11, q12
vorr q11, q11, q12
veor q12, q12, q9
vand q8, q8, q9
veor q9, q6, q2
vand q15, q15, q12
vand q13, q13, q9
veor q9, q3, q7
veor q12, q1, q5
veor q11, q11, q13
veor q10, q10, q13
vand q13, q9, q12
vorr q9, q9, q12
veor q11, q11, q15
veor q8, q8, q13
veor q10, q10, q14
veor q9, q9, q15
veor q8, q8, q14
vand q12, q4, q6
veor q9, q9, q14
vand q13, q0, q2
vand q14, q7, q1
vorr q15, q3, q5
veor q11, q11, q12
veor q9, q9, q14
veor q8, q8, q15
veor q10, q10, q13
@ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
@ new smaller inversion
vand q14, q11, q9
vmov q12, q8
veor q13, q10, q14
veor q15, q8, q14
veor q14, q8, q14 @ q14=q15
vbsl q13, q9, q8
vbsl q15, q11, q10
veor q11, q11, q10
vbsl q12, q13, q14
vbsl q8, q14, q13
vand q14, q12, q15
veor q9, q9, q8
veor q14, q14, q11
veor q12, q5, q2
veor q8, q1, q6
veor q10, q15, q14
vand q10, q10, q5
veor q5, q5, q1
vand q11, q1, q15
vand q5, q5, q14
veor q1, q11, q10
veor q5, q5, q11
veor q15, q15, q13
veor q14, q14, q9
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q2
veor q12, q12, q8
veor q2, q2, q6
vand q8, q8, q15
vand q6, q6, q13
vand q12, q12, q14
vand q2, q2, q9
veor q8, q8, q12
veor q2, q2, q6
veor q12, q12, q11
veor q6, q6, q10
veor q5, q5, q12
veor q2, q2, q12
veor q1, q1, q8
veor q6, q6, q8
veor q12, q3, q0
veor q8, q7, q4
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q0
veor q12, q12, q8
veor q0, q0, q4
vand q8, q8, q15
vand q4, q4, q13
vand q12, q12, q14
vand q0, q0, q9
veor q8, q8, q12
veor q0, q0, q4
veor q12, q12, q11
veor q4, q4, q10
veor q15, q15, q13
veor q14, q14, q9
veor q10, q15, q14
vand q10, q10, q3
veor q3, q3, q7
vand q11, q7, q15
vand q3, q3, q14
veor q7, q11, q10
veor q3, q3, q11
veor q3, q3, q12
veor q0, q0, q12
veor q7, q7, q8
veor q4, q4, q8
veor q1, q1, q7
veor q6, q6, q5
veor q4, q4, q1
veor q2, q2, q7
veor q5, q5, q7
veor q4, q4, q2
veor q7, q7, q0
veor q4, q4, q5
veor q3, q3, q6
veor q6, q6, q1
veor q3, q3, q4
veor q4, q4, q0
veor q7, q7, q3
subs r5,r5,#1
bcc Ldec_done
@ multiplication by 0x05-0x00-0x04-0x00
vext.8 q8, q0, q0, #8
vext.8 q14, q3, q3, #8
vext.8 q15, q5, q5, #8
veor q8, q8, q0
vext.8 q9, q1, q1, #8
veor q14, q14, q3
vext.8 q10, q6, q6, #8
veor q15, q15, q5
vext.8 q11, q4, q4, #8
veor q9, q9, q1
vext.8 q12, q2, q2, #8
veor q10, q10, q6
vext.8 q13, q7, q7, #8
veor q11, q11, q4
veor q12, q12, q2
veor q13, q13, q7
veor q0, q0, q14
veor q1, q1, q14
veor q6, q6, q8
veor q2, q2, q10
veor q4, q4, q9
veor q1, q1, q15
veor q6, q6, q15
veor q2, q2, q14
veor q7, q7, q11
veor q4, q4, q14
veor q3, q3, q12
veor q2, q2, q15
veor q7, q7, q15
veor q5, q5, q13
vext.8 q8, q0, q0, #12 @ x0 <<< 32
vext.8 q9, q1, q1, #12
veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
vext.8 q10, q6, q6, #12
veor q1, q1, q9
vext.8 q11, q4, q4, #12
veor q6, q6, q10
vext.8 q12, q2, q2, #12
veor q4, q4, q11
vext.8 q13, q7, q7, #12
veor q2, q2, q12
vext.8 q14, q3, q3, #12
veor q7, q7, q13
vext.8 q15, q5, q5, #12
veor q3, q3, q14
veor q9, q9, q0
veor q5, q5, q15
vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
veor q10, q10, q1
veor q8, q8, q5
veor q9, q9, q5
vext.8 q1, q1, q1, #8
veor q13, q13, q2
veor q0, q0, q8
veor q14, q14, q7
veor q1, q1, q9
vext.8 q8, q2, q2, #8
veor q12, q12, q4
vext.8 q9, q7, q7, #8
veor q15, q15, q3
vext.8 q2, q4, q4, #8
veor q11, q11, q6
vext.8 q7, q5, q5, #8
veor q12, q12, q5
vext.8 q4, q3, q3, #8
veor q11, q11, q5
vext.8 q3, q6, q6, #8
veor q5, q9, q13
veor q11, q11, q2
veor q7, q7, q15
veor q6, q4, q14
veor q4, q8, q12
veor q2, q3, q10
vmov q3, q11
@ vmov q5, q9
vldmia r6, {q12} @ LISR
ite eq @ Thumb2 thing, sanity check in ARM
addeq r6,r6,#0x10
bne Ldec_loop
vldmia r6, {q12} @ LISRM0
b Ldec_loop
.align 4
Ldec_done:
vmov.i8 q8,#0x55 @ compose LBS0
vmov.i8 q9,#0x33 @ compose LBS1
vshr.u64 q10, q3, #1
vshr.u64 q11, q2, #1
veor q10, q10, q5
veor q11, q11, q7
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #1
veor q7, q7, q11
vshl.u64 q11, q11, #1
veor q3, q3, q10
veor q2, q2, q11
vshr.u64 q10, q6, #1
vshr.u64 q11, q0, #1
veor q10, q10, q4
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q4, q4, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q6, q6, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose LBS2
vshr.u64 q10, q7, #2
vshr.u64 q11, q2, #2
veor q10, q10, q5
veor q11, q11, q3
vand q10, q10, q9
vand q11, q11, q9
veor q5, q5, q10
vshl.u64 q10, q10, #2
veor q3, q3, q11
vshl.u64 q11, q11, #2
veor q7, q7, q10
veor q2, q2, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q4
veor q11, q11, q6
vand q10, q10, q9
vand q11, q11, q9
veor q4, q4, q10
vshl.u64 q10, q10, #2
veor q6, q6, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q4, #4
vshr.u64 q11, q6, #4
veor q10, q10, q5
veor q11, q11, q3
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q3, q3, q11
vshl.u64 q11, q11, #4
veor q4, q4, q10
veor q6, q6, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q7
veor q11, q11, q2
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q2, q2, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
vldmia r4, {q8} @ last round key
veor q6, q6, q8
veor q4, q4, q8
veor q2, q2, q8
veor q7, q7, q8
veor q3, q3, q8
veor q5, q5, q8
veor q0, q0, q8
veor q1, q1, q8
bx lr
.align 6
_bsaes_const:
LM0ISR:@ InvShiftRows constants
.quad 0x0a0e0206070b0f03, 0x0004080c0d010509
LISR:
.quad 0x0504070602010003, 0x0f0e0d0c080b0a09
LISRM0:
.quad 0x01040b0e0205080f, 0x0306090c00070a0d
LM0SR:@ ShiftRows constants
.quad 0x0a0e02060f03070b, 0x0004080c05090d01
LSR:
.quad 0x0504070600030201, 0x0f0e0d0c0a09080b
LSRM0:
.quad 0x0304090e00050a0f, 0x01060b0c0207080d
LM0:
.quad 0x02060a0e03070b0f, 0x0004080c0105090d
LREVM0SR:
.quad 0x090d01050c000408, 0x03070b0f060a0e02
.byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 6
#ifdef __thumb2__
.thumb_func _bsaes_encrypt8
#endif
.align 4
_bsaes_encrypt8:
adr r6,.
vldmia r4!, {q9} @ round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,LM0SR
#else
sub r6,r6,#_bsaes_encrypt8-LM0SR
#endif
vldmia r6!, {q8} @ LM0SR
_bsaes_encrypt8_alt:
veor q10, q0, q9 @ xor with round0 key
veor q11, q1, q9
vtbl.8 d0, {q10}, d16
vtbl.8 d1, {q10}, d17
veor q12, q2, q9
vtbl.8 d2, {q11}, d16
vtbl.8 d3, {q11}, d17
veor q13, q3, q9
vtbl.8 d4, {q12}, d16
vtbl.8 d5, {q12}, d17
veor q14, q4, q9
vtbl.8 d6, {q13}, d16
vtbl.8 d7, {q13}, d17
veor q15, q5, q9
vtbl.8 d8, {q14}, d16
vtbl.8 d9, {q14}, d17
veor q10, q6, q9
vtbl.8 d10, {q15}, d16
vtbl.8 d11, {q15}, d17
veor q11, q7, q9
vtbl.8 d12, {q10}, d16
vtbl.8 d13, {q10}, d17
vtbl.8 d14, {q11}, d16
vtbl.8 d15, {q11}, d17
_bsaes_encrypt8_bitslice:
vmov.i8 q8,#0x55 @ compose LBS0
vmov.i8 q9,#0x33 @ compose LBS1
vshr.u64 q10, q6, #1
vshr.u64 q11, q4, #1
veor q10, q10, q7
veor q11, q11, q5
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #1
veor q5, q5, q11
vshl.u64 q11, q11, #1
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q2, #1
vshr.u64 q11, q0, #1
veor q10, q10, q3
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q3, q3, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose LBS2
vshr.u64 q10, q5, #2
vshr.u64 q11, q4, #2
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q9
vand q11, q11, q9
veor q7, q7, q10
vshl.u64 q10, q10, #2
veor q6, q6, q11
vshl.u64 q11, q11, #2
veor q5, q5, q10
veor q4, q4, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q3
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q3, q3, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q3, #4
vshr.u64 q11, q2, #4
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q6, q6, q11
vshl.u64 q11, q11, #4
veor q3, q3, q10
veor q2, q2, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q5
veor q11, q11, q4
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q4, q4, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
sub r5,r5,#1
b Lenc_sbox
.align 4
Lenc_loop:
vldmia r4!, {q8,q9,q10,q11}
veor q8, q8, q0
veor q9, q9, q1
vtbl.8 d0, {q8}, d24
vtbl.8 d1, {q8}, d25
vldmia r4!, {q8}
veor q10, q10, q2
vtbl.8 d2, {q9}, d24
vtbl.8 d3, {q9}, d25
vldmia r4!, {q9}
veor q11, q11, q3
vtbl.8 d4, {q10}, d24
vtbl.8 d5, {q10}, d25
vldmia r4!, {q10}
vtbl.8 d6, {q11}, d24
vtbl.8 d7, {q11}, d25
vldmia r4!, {q11}
veor q8, q8, q4
veor q9, q9, q5
vtbl.8 d8, {q8}, d24
vtbl.8 d9, {q8}, d25
veor q10, q10, q6
vtbl.8 d10, {q9}, d24
vtbl.8 d11, {q9}, d25
veor q11, q11, q7
vtbl.8 d12, {q10}, d24
vtbl.8 d13, {q10}, d25
vtbl.8 d14, {q11}, d24
vtbl.8 d15, {q11}, d25
Lenc_sbox:
veor q2, q2, q1
veor q5, q5, q6
veor q3, q3, q0
veor q6, q6, q2
veor q5, q5, q0
veor q6, q6, q3
veor q3, q3, q7
veor q7, q7, q5
veor q3, q3, q4
veor q4, q4, q5
veor q2, q2, q7
veor q3, q3, q1
veor q1, q1, q5
veor q11, q7, q4
veor q10, q1, q2
veor q9, q5, q3
veor q13, q2, q4
vmov q8, q10
veor q12, q6, q0
vorr q10, q10, q9
veor q15, q11, q8
vand q14, q11, q12
vorr q11, q11, q12
veor q12, q12, q9
vand q8, q8, q9
veor q9, q3, q0
vand q15, q15, q12
vand q13, q13, q9
veor q9, q7, q1
veor q12, q5, q6
veor q11, q11, q13
veor q10, q10, q13
vand q13, q9, q12
vorr q9, q9, q12
veor q11, q11, q15
veor q8, q8, q13
veor q10, q10, q14
veor q9, q9, q15
veor q8, q8, q14
vand q12, q2, q3
veor q9, q9, q14
vand q13, q4, q0
vand q14, q1, q5
vorr q15, q7, q6
veor q11, q11, q12
veor q9, q9, q14
veor q8, q8, q15
veor q10, q10, q13
@ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
@ new smaller inversion
vand q14, q11, q9
vmov q12, q8
veor q13, q10, q14
veor q15, q8, q14
veor q14, q8, q14 @ q14=q15
vbsl q13, q9, q8
vbsl q15, q11, q10
veor q11, q11, q10
vbsl q12, q13, q14
vbsl q8, q14, q13
vand q14, q12, q15
veor q9, q9, q8
veor q14, q14, q11
veor q12, q6, q0
veor q8, q5, q3
veor q10, q15, q14
vand q10, q10, q6
veor q6, q6, q5
vand q11, q5, q15
vand q6, q6, q14
veor q5, q11, q10
veor q6, q6, q11
veor q15, q15, q13
veor q14, q14, q9
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q0
veor q12, q12, q8
veor q0, q0, q3
vand q8, q8, q15
vand q3, q3, q13
vand q12, q12, q14
vand q0, q0, q9
veor q8, q8, q12
veor q0, q0, q3
veor q12, q12, q11
veor q3, q3, q10
veor q6, q6, q12
veor q0, q0, q12
veor q5, q5, q8
veor q3, q3, q8
veor q12, q7, q4
veor q8, q1, q2
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q4
veor q12, q12, q8
veor q4, q4, q2
vand q8, q8, q15
vand q2, q2, q13
vand q12, q12, q14
vand q4, q4, q9
veor q8, q8, q12
veor q4, q4, q2
veor q12, q12, q11
veor q2, q2, q10
veor q15, q15, q13
veor q14, q14, q9
veor q10, q15, q14
vand q10, q10, q7
veor q7, q7, q1
vand q11, q1, q15
vand q7, q7, q14
veor q1, q11, q10
veor q7, q7, q11
veor q7, q7, q12
veor q4, q4, q12
veor q1, q1, q8
veor q2, q2, q8
veor q7, q7, q0
veor q1, q1, q6
veor q6, q6, q0
veor q4, q4, q7
veor q0, q0, q1
veor q1, q1, q5
veor q5, q5, q2
veor q2, q2, q3
veor q3, q3, q5
veor q4, q4, q5
veor q6, q6, q3
subs r5,r5,#1
bcc Lenc_done
vext.8 q8, q0, q0, #12 @ x0 <<< 32
vext.8 q9, q1, q1, #12
veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
vext.8 q10, q4, q4, #12
veor q1, q1, q9
vext.8 q11, q6, q6, #12
veor q4, q4, q10
vext.8 q12, q3, q3, #12
veor q6, q6, q11
vext.8 q13, q7, q7, #12
veor q3, q3, q12
vext.8 q14, q2, q2, #12
veor q7, q7, q13
vext.8 q15, q5, q5, #12
veor q2, q2, q14
veor q9, q9, q0
veor q5, q5, q15
vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
veor q10, q10, q1
veor q8, q8, q5
veor q9, q9, q5
vext.8 q1, q1, q1, #8
veor q13, q13, q3
veor q0, q0, q8
veor q14, q14, q7
veor q1, q1, q9
vext.8 q8, q3, q3, #8
veor q12, q12, q6
vext.8 q9, q7, q7, #8
veor q15, q15, q2
vext.8 q3, q6, q6, #8
veor q11, q11, q4
vext.8 q7, q5, q5, #8
veor q12, q12, q5
vext.8 q6, q2, q2, #8
veor q11, q11, q5
vext.8 q2, q4, q4, #8
veor q5, q9, q13
veor q4, q8, q12
veor q3, q3, q11
veor q7, q7, q15
veor q6, q6, q14
@ vmov q4, q8
veor q2, q2, q10
@ vmov q5, q9
vldmia r6, {q12} @ LSR
ite eq @ Thumb2 thing, samity check in ARM
addeq r6,r6,#0x10
bne Lenc_loop
vldmia r6, {q12} @ LSRM0
b Lenc_loop
.align 4
Lenc_done:
vmov.i8 q8,#0x55 @ compose LBS0
vmov.i8 q9,#0x33 @ compose LBS1
vshr.u64 q10, q2, #1
vshr.u64 q11, q3, #1
veor q10, q10, q5
veor q11, q11, q7
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #1
veor q7, q7, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q3, q3, q11
vshr.u64 q10, q4, #1
vshr.u64 q11, q0, #1
veor q10, q10, q6
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q6, q6, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q4, q4, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose LBS2
vshr.u64 q10, q7, #2
vshr.u64 q11, q3, #2
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q5, q5, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q7, q7, q10
veor q3, q3, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q6
veor q11, q11, q4
vand q10, q10, q9
vand q11, q11, q9
veor q6, q6, q10
vshl.u64 q10, q10, #2
veor q4, q4, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q6, #4
vshr.u64 q11, q4, #4
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q2, q2, q11
vshl.u64 q11, q11, #4
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q7
veor q11, q11, q3
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q3, q3, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
vldmia r4, {q8} @ last round key
veor q4, q4, q8
veor q6, q6, q8
veor q3, q3, q8
veor q7, q7, q8
veor q2, q2, q8
veor q5, q5, q8
veor q0, q0, q8
veor q1, q1, q8
bx lr
#ifdef __thumb2__
.thumb_func _bsaes_key_convert
#endif
.align 4
_bsaes_key_convert:
adr r6,.
vld1.8 {q7}, [r4]! @ load round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,LM0
#else
sub r6,r6,#_bsaes_key_convert-LM0
#endif
vld1.8 {q15}, [r4]! @ load round 1 key
vmov.i8 q8, #0x01 @ bit masks
vmov.i8 q9, #0x02
vmov.i8 q10, #0x04
vmov.i8 q11, #0x08
vmov.i8 q12, #0x10
vmov.i8 q13, #0x20
vldmia r6, {q14} @ LM0
#ifdef __ARMEL__
vrev32.8 q7, q7
vrev32.8 q15, q15
#endif
sub r5,r5,#1
vstmia r12!, {q7} @ save round 0 key
b Lkey_loop
.align 4
Lkey_loop:
vtbl.8 d14,{q15},d28
vtbl.8 d15,{q15},d29
vmov.i8 q6, #0x40
vmov.i8 q15, #0x80
vtst.8 q0, q7, q8
vtst.8 q1, q7, q9
vtst.8 q2, q7, q10
vtst.8 q3, q7, q11
vtst.8 q4, q7, q12
vtst.8 q5, q7, q13
vtst.8 q6, q7, q6
vtst.8 q7, q7, q15
vld1.8 {q15}, [r4]! @ load next round key
vmvn q0, q0 @ "pnot"
vmvn q1, q1
vmvn q5, q5
vmvn q6, q6
#ifdef __ARMEL__
vrev32.8 q15, q15
#endif
subs r5,r5,#1
vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key
bne Lkey_loop
vmov.i8 q7,#0x63 @ compose L63
@ don't save last round key
bx lr
.globl _bsaes_cbc_encrypt
.private_extern _bsaes_cbc_encrypt
#ifdef __thumb2__
.thumb_func _bsaes_cbc_encrypt
#endif
.align 5
_bsaes_cbc_encrypt:
@ In OpenSSL, this function had a fallback to aes_nohw_cbc_encrypt for
@ short inputs. We patch this out, using bsaes for all input sizes.
@ it is up to the caller to make sure we are called with enc == 0
mov ip, sp
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr}
VFP_ABI_PUSH
ldr r8, [ip] @ IV is 1st arg on the stack
mov r2, r2, lsr#4 @ len in 16 byte blocks
sub sp, #0x10 @ scratch space to carry over the IV
mov r9, sp @ save sp
ldr r10, [r3, #240] @ get # of rounds
#ifndef BSAES_ASM_EXTENDED_KEY
@ allocate the key schedule on the stack
sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
add r12, #96 @ sifze of bit-slices key schedule
@ populate the key schedule
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
mov sp, r12 @ sp is sp
bl _bsaes_key_convert
vldmia sp, {q6}
vstmia r12, {q15} @ save last round key
veor q7, q7, q6 @ fix up round 0 key
vstmia sp, {q7}
#else
ldr r12, [r3, #244]
eors r12, #1
beq 0f
@ populate the key schedule
str r12, [r3, #244]
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
add r12, r3, #248 @ pass key schedule
bl _bsaes_key_convert
add r4, r3, #248
vldmia r4, {q6}
vstmia r12, {q15} @ save last round key
veor q7, q7, q6 @ fix up round 0 key
vstmia r4, {q7}
.align 2
#endif
vld1.8 {q15}, [r8] @ load IV
b Lcbc_dec_loop
.align 4
Lcbc_dec_loop:
subs r2, r2, #0x8
bmi Lcbc_dec_loop_finish
vld1.8 {q0,q1}, [r0]! @ load input
vld1.8 {q2,q3}, [r0]!
#ifndef BSAES_ASM_EXTENDED_KEY
mov r4, sp @ pass the key
#else
add r4, r3, #248
#endif
vld1.8 {q4,q5}, [r0]!
mov r5, r10
vld1.8 {q6,q7}, [r0]
sub r0, r0, #0x60
vstmia r9, {q15} @ put aside IV
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q12,q13}, [r0]!
veor q4, q4, q10
veor q2, q2, q11
vld1.8 {q14,q15}, [r0]!
veor q7, q7, q12
vst1.8 {q0,q1}, [r1]! @ write output
veor q3, q3, q13
vst1.8 {q6}, [r1]!
veor q5, q5, q14
vst1.8 {q4}, [r1]!
vst1.8 {q2}, [r1]!
vst1.8 {q7}, [r1]!
vst1.8 {q3}, [r1]!
vst1.8 {q5}, [r1]!
b Lcbc_dec_loop
Lcbc_dec_loop_finish:
adds r2, r2, #8
beq Lcbc_dec_done
@ Set up most parameters for the _bsaes_decrypt8 call.
#ifndef BSAES_ASM_EXTENDED_KEY
mov r4, sp @ pass the key
#else
add r4, r3, #248
#endif
mov r5, r10
vstmia r9, {q15} @ put aside IV
vld1.8 {q0}, [r0]! @ load input
cmp r2, #2
blo Lcbc_dec_one
vld1.8 {q1}, [r0]!
beq Lcbc_dec_two
vld1.8 {q2}, [r0]!
cmp r2, #4
blo Lcbc_dec_three
vld1.8 {q3}, [r0]!
beq Lcbc_dec_four
vld1.8 {q4}, [r0]!
cmp r2, #6
blo Lcbc_dec_five
vld1.8 {q5}, [r0]!
beq Lcbc_dec_six
vld1.8 {q6}, [r0]!
sub r0, r0, #0x70
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q12,q13}, [r0]!
veor q4, q4, q10
veor q2, q2, q11
vld1.8 {q15}, [r0]!
veor q7, q7, q12
vst1.8 {q0,q1}, [r1]! @ write output
veor q3, q3, q13
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
vst1.8 {q2}, [r1]!
vst1.8 {q7}, [r1]!
vst1.8 {q3}, [r1]!
b Lcbc_dec_done
.align 4
Lcbc_dec_six:
sub r0, r0, #0x60
bl _bsaes_decrypt8
vldmia r9,{q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q12}, [r0]!
veor q4, q4, q10
veor q2, q2, q11
vld1.8 {q15}, [r0]!
veor q7, q7, q12
vst1.8 {q0,q1}, [r1]! @ write output
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
vst1.8 {q2}, [r1]!
vst1.8 {q7}, [r1]!
b Lcbc_dec_done
.align 4
Lcbc_dec_five:
sub r0, r0, #0x50
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q15}, [r0]!
veor q4, q4, q10
vst1.8 {q0,q1}, [r1]! @ write output
veor q2, q2, q11
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
vst1.8 {q2}, [r1]!
b Lcbc_dec_done
.align 4
Lcbc_dec_four:
sub r0, r0, #0x40
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q15}, [r0]!
veor q4, q4, q10
vst1.8 {q0,q1}, [r1]! @ write output
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
b Lcbc_dec_done
.align 4
Lcbc_dec_three:
sub r0, r0, #0x30
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q15}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vst1.8 {q0,q1}, [r1]! @ write output
vst1.8 {q6}, [r1]!
b Lcbc_dec_done
.align 4
Lcbc_dec_two:
sub r0, r0, #0x20
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q15}, [r0]! @ reload input
veor q1, q1, q8
vst1.8 {q0,q1}, [r1]! @ write output
b Lcbc_dec_done
.align 4
Lcbc_dec_one:
sub r0, r0, #0x10
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q15}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vst1.8 {q0}, [r1]! @ write output
Lcbc_dec_done:
#ifndef BSAES_ASM_EXTENDED_KEY
vmov.i32 q0, #0
vmov.i32 q1, #0
Lcbc_dec_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r9
bne Lcbc_dec_bzero
#endif
mov sp, r9
add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
vst1.8 {q15}, [r8] @ return IV
VFP_ABI_POP
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc}
.globl _bsaes_ctr32_encrypt_blocks
.private_extern _bsaes_ctr32_encrypt_blocks
#ifdef __thumb2__
.thumb_func _bsaes_ctr32_encrypt_blocks
#endif
.align 5
_bsaes_ctr32_encrypt_blocks:
@ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this
@ out to retain a constant-time implementation.
mov ip, sp
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr}
VFP_ABI_PUSH
ldr r8, [ip] @ ctr is 1st arg on the stack
sub sp, sp, #0x10 @ scratch space to carry over the ctr
mov r9, sp @ save sp
ldr r10, [r3, #240] @ get # of rounds
#ifndef BSAES_ASM_EXTENDED_KEY
@ allocate the key schedule on the stack
sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
add r12, #96 @ size of bit-sliced key schedule
@ populate the key schedule
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
mov sp, r12 @ sp is sp
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
vld1.8 {q0}, [r8] @ load counter
#ifdef __APPLE__
mov r8, #:lower16:(LREVM0SR-LM0)
add r8, r6, r8
#else
add r8, r6, #LREVM0SR-LM0 @ borrow r8
#endif
vldmia sp, {q4} @ load round0 key
#else
ldr r12, [r3, #244]
eors r12, #1
beq 0f
@ populate the key schedule
str r12, [r3, #244]
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
add r12, r3, #248 @ pass key schedule
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
.align 2
add r12, r3, #248
vld1.8 {q0}, [r8] @ load counter
adrl r8, LREVM0SR @ borrow r8
vldmia r12, {q4} @ load round0 key
sub sp, #0x10 @ place for adjusted round0 key
#endif
vmov.i32 q8,#1 @ compose 1<<96
veor q9,q9,q9
vrev32.8 q0,q0
vext.8 q8,q9,q8,#4
vrev32.8 q4,q4
vadd.u32 q9,q8,q8 @ compose 2<<96
vstmia sp, {q4} @ save adjusted round0 key
b Lctr_enc_loop
.align 4
Lctr_enc_loop:
vadd.u32 q10, q8, q9 @ compose 3<<96
vadd.u32 q1, q0, q8 @ +1
vadd.u32 q2, q0, q9 @ +2
vadd.u32 q3, q0, q10 @ +3
vadd.u32 q4, q1, q10
vadd.u32 q5, q2, q10
vadd.u32 q6, q3, q10
vadd.u32 q7, q4, q10
vadd.u32 q10, q5, q10 @ next counter
@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
@ to flip byte order in 32-bit counter
vldmia sp, {q9} @ load round0 key
#ifndef BSAES_ASM_EXTENDED_KEY
add r4, sp, #0x10 @ pass next round key
#else
add r4, r3, #264
#endif
vldmia r8, {q8} @ LREVM0SR
mov r5, r10 @ pass rounds
vstmia r9, {q10} @ save next counter
#ifdef __APPLE__
mov r6, #:lower16:(LREVM0SR-LSR)
sub r6, r8, r6
#else
sub r6, r8, #LREVM0SR-LSR @ pass constants
#endif
bl _bsaes_encrypt8_alt
subs r2, r2, #8
blo Lctr_enc_loop_done
vld1.8 {q8,q9}, [r0]! @ load input
vld1.8 {q10,q11}, [r0]!
veor q0, q8
veor q1, q9
vld1.8 {q12,q13}, [r0]!
veor q4, q10
veor q6, q11
vld1.8 {q14,q15}, [r0]!
veor q3, q12
vst1.8 {q0,q1}, [r1]! @ write output
veor q7, q13
veor q2, q14
vst1.8 {q4}, [r1]!
veor q5, q15
vst1.8 {q6}, [r1]!
vmov.i32 q8, #1 @ compose 1<<96
vst1.8 {q3}, [r1]!
veor q9, q9, q9
vst1.8 {q7}, [r1]!
vext.8 q8, q9, q8, #4
vst1.8 {q2}, [r1]!
vadd.u32 q9,q8,q8 @ compose 2<<96
vst1.8 {q5}, [r1]!
vldmia r9, {q0} @ load counter
bne Lctr_enc_loop
b Lctr_enc_done
.align 4
Lctr_enc_loop_done:
add r2, r2, #8
vld1.8 {q8}, [r0]! @ load input
veor q0, q8
vst1.8 {q0}, [r1]! @ write output
cmp r2, #2
blo Lctr_enc_done
vld1.8 {q9}, [r0]!
veor q1, q9
vst1.8 {q1}, [r1]!
beq Lctr_enc_done
vld1.8 {q10}, [r0]!
veor q4, q10
vst1.8 {q4}, [r1]!
cmp r2, #4
blo Lctr_enc_done
vld1.8 {q11}, [r0]!
veor q6, q11
vst1.8 {q6}, [r1]!
beq Lctr_enc_done
vld1.8 {q12}, [r0]!
veor q3, q12
vst1.8 {q3}, [r1]!
cmp r2, #6
blo Lctr_enc_done
vld1.8 {q13}, [r0]!
veor q7, q13
vst1.8 {q7}, [r1]!
beq Lctr_enc_done
vld1.8 {q14}, [r0]
veor q2, q14
vst1.8 {q2}, [r1]!
Lctr_enc_done:
vmov.i32 q0, #0
vmov.i32 q1, #0
#ifndef BSAES_ASM_EXTENDED_KEY
Lctr_enc_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r9
bne Lctr_enc_bzero
#else
vstmia sp, {q0,q1}
#endif
mov sp, r9
add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
VFP_ABI_POP
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return
@ OpenSSL contains aes_nohw_* fallback code here. We patch this
@ out to retain a constant-time implementation.
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
wlsfx/bnbb
| 40,095
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/fipsmodule/vpaes-armv7.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
.syntax unified
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
.text
.align 7 @ totally strategic alignment
_vpaes_consts:
Lk_mc_forward:@ mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
Lk_mc_backward:@ mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
Lk_sr:@ sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
@
@ "Hot" constants
@
Lk_inv:@ inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
Lk_ipt:@ input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
Lk_sbo:@ sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
Lk_sb1:@ sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
Lk_sb2:@ sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.align 6
@@
@@ _aes_preheat
@@
@@ Fills q9-q15 as specified below.
@@
#ifdef __thumb2__
.thumb_func _vpaes_preheat
#endif
.align 4
_vpaes_preheat:
adr r10, Lk_inv
vmov.i8 q9, #0x0f @ Lk_s0F
vld1.64 {q10,q11}, [r10]! @ Lk_inv
add r10, r10, #64 @ Skip Lk_ipt, Lk_sbo
vld1.64 {q12,q13}, [r10]! @ Lk_sb1
vld1.64 {q14,q15}, [r10] @ Lk_sb2
bx lr
@@
@@ _aes_encrypt_core
@@
@@ AES-encrypt q0.
@@
@@ Inputs:
@@ q0 = input
@@ q9-q15 as in _vpaes_preheat
@@ [r2] = scheduled keys
@@
@@ Output in q0
@@ Clobbers q1-q5, r8-r11
@@ Preserves q6-q8 so you get some local vectors
@@
@@
#ifdef __thumb2__
.thumb_func _vpaes_encrypt_core
#endif
.align 4
_vpaes_encrypt_core:
mov r9, r2
ldr r8, [r2,#240] @ pull rounds
adr r11, Lk_ipt
@ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
@ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
vld1.64 {q2, q3}, [r11]
adr r11, Lk_mc_forward+16
vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0
vtbl.8 d2, {q2}, d2 @ vpshufb %xmm1, %xmm2, %xmm1
vtbl.8 d3, {q2}, d3
vtbl.8 d4, {q3}, d0 @ vpshufb %xmm0, %xmm3, %xmm2
vtbl.8 d5, {q3}, d1
veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0
veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
@ .Lenc_entry ends with a bnz instruction which is normally paired with
@ subs in .Lenc_loop.
tst r8, r8
b Lenc_entry
.align 4
Lenc_loop:
@ middle of middle round
add r10, r11, #0x40
vtbl.8 d8, {q13}, d4 @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
vtbl.8 d9, {q13}, d5
vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
vtbl.8 d0, {q12}, d6 @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
vtbl.8 d1, {q12}, d7
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
vtbl.8 d10, {q15}, d4 @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
vtbl.8 d11, {q15}, d5
veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A
vtbl.8 d4, {q14}, d6 @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
vtbl.8 d5, {q14}, d7
vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
vtbl.8 d6, {q0}, d2 @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
vtbl.8 d7, {q0}, d3
veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
@ Write to q5 instead of q0, so the table and destination registers do
@ not overlap.
vtbl.8 d10, {q0}, d8 @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
vtbl.8 d11, {q0}, d9
veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
vtbl.8 d8, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
vtbl.8 d9, {q3}, d3
@ Here we restore the original q0/q5 usage.
veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and r11, r11, #~(1<<6) @ and $0x30, %r11 # ... mod 4
veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
subs r8, r8, #1 @ nr--
Lenc_entry:
@ top of round
vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i
vtbl.8 d10, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
vtbl.8 d11, {q11}, d3
veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
vtbl.8 d7, {q10}, d1
vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
vtbl.8 d9, {q10}, d3
veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
vtbl.8 d5, {q10}, d7
vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
vtbl.8 d7, {q10}, d9
veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io
veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5
bne Lenc_loop
@ middle of last round
add r10, r11, #0x80
adr r11, Lk_sbo
@ Read to q1 instead of q4, so the vtbl.8 instruction below does not
@ overlap table and destination registers.
vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou
vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot Lk_sbo+16
vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
vtbl.8 d9, {q1}, d5
vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
@ Write to q2 instead of q0 below, to avoid overlapping table and
@ destination registers.
vtbl.8 d4, {q0}, d6 @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
vtbl.8 d5, {q0}, d7
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A
@ Here we restore the original q0/q2 usage.
vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0
vtbl.8 d1, {q2}, d3
bx lr
.globl _vpaes_encrypt
.private_extern _vpaes_encrypt
#ifdef __thumb2__
.thumb_func _vpaes_encrypt
#endif
.align 4
_vpaes_encrypt:
@ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack
@ alignment.
stmdb sp!, {r7,r8,r9,r10,r11,lr}
@ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved.
vstmdb sp!, {d8,d9,d10,d11}
vld1.64 {q0}, [r0]
bl _vpaes_preheat
bl _vpaes_encrypt_core
vst1.64 {q0}, [r1]
vldmia sp!, {d8,d9,d10,d11}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
@
@ Decryption stuff
@
.align 4
_vpaes_decrypt_consts:
Lk_dipt:@ decryption input transform
.quad 0x0F505B040B545F00, 0x154A411E114E451A
.quad 0x86E383E660056500, 0x12771772F491F194
Lk_dsbo:@ decryption sbox final output
.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
Lk_dsb9:@ decryption sbox output *9*u, *9*t
.quad 0x851C03539A86D600, 0xCAD51F504F994CC9
.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
Lk_dsbd:@ decryption sbox output *D*u, *D*t
.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
Lk_dsbb:@ decryption sbox output *B*u, *B*t
.quad 0xD022649296B44200, 0x602646F6B0F2D404
.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
Lk_dsbe:@ decryption sbox output *E*u, *E*t
.quad 0x46F2929626D4D000, 0x2242600464B4F6B0
.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
@@
@@ Decryption core
@@
@@ Same API as encryption core, except it clobbers q12-q15 rather than using
@@ the values from _vpaes_preheat. q9-q11 must still be set from
@@ _vpaes_preheat.
@@
#ifdef __thumb2__
.thumb_func _vpaes_decrypt_core
#endif
.align 4
_vpaes_decrypt_core:
mov r9, r2
ldr r8, [r2,#240] @ pull rounds
@ This function performs shuffles with various constants. The x86_64
@ version loads them on-demand into %xmm0-%xmm5. This does not work well
@ for ARMv7 because those registers are shuffle destinations. The ARMv8
@ version preloads those constants into registers, but ARMv7 has half
@ the registers to work with. Instead, we load them on-demand into
@ q12-q15, registers normally use for preloaded constants. This is fine
@ because decryption doesn't use those constants. The values are
@ constant, so this does not interfere with potential 2x optimizations.
adr r7, Lk_dipt
vld1.64 {q12,q13}, [r7] @ vmovdqa Lk_dipt(%rip), %xmm2 # iptlo
lsl r11, r8, #4 @ mov %rax, %r11; shl $4, %r11
eor r11, r11, #0x30 @ xor $0x30, %r11
adr r10, Lk_sr
and r11, r11, #0x30 @ and $0x30, %r11
add r11, r11, r10
adr r10, Lk_mc_forward+48
vld1.64 {q4}, [r9]! @ vmovdqu (%r9), %xmm4 # round0 key
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0
vtbl.8 d4, {q12}, d2 @ vpshufb %xmm1, %xmm2, %xmm2
vtbl.8 d5, {q12}, d3
vld1.64 {q5}, [r10] @ vmovdqa Lk_mc_forward+48(%rip), %xmm5
@ vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
vtbl.8 d0, {q13}, d0 @ vpshufb %xmm0, %xmm1, %xmm0
vtbl.8 d1, {q13}, d1
veor q2, q2, q4 @ vpxor %xmm4, %xmm2, %xmm2
veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
@ .Ldec_entry ends with a bnz instruction which is normally paired with
@ subs in .Ldec_loop.
tst r8, r8
b Ldec_entry
.align 4
Ldec_loop:
@
@ Inverse mix columns
@
@ We load .Lk_dsb* into q12-q15 on-demand. See the comment at the top of
@ the function.
adr r10, Lk_dsb9
vld1.64 {q12,q13}, [r10]! @ vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
@ vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
@ Load sbd* ahead of time.
vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
@ vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
vtbl.8 d9, {q12}, d5
vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
vtbl.8 d3, {q13}, d7
veor q0, q4, q0 @ vpxor %xmm4, %xmm0, %xmm0
veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
@ Load sbb* ahead of time.
vld1.64 {q12,q13}, [r10]! @ vmovdqa 0x20(%r10),%xmm4 # 4 : sbbu
@ vmovdqa 0x30(%r10),%xmm1 # 0 : sbbt
vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
vtbl.8 d9, {q14}, d5
@ Write to q1 instead of q0, so the table and destination registers do
@ not overlap.
vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch
vtbl.8 d3, {q0}, d11
@ Here we restore the original q0/q1 usage. This instruction is
@ reordered from the ARMv8 version so we do not clobber the vtbl.8
@ below.
veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
vtbl.8 d3, {q15}, d7
@ vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
@ vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
@ Load sbd* ahead of time.
vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x40(%r10),%xmm4 # 4 : sbeu
@ vmovdqa 0x50(%r10),%xmm1 # 0 : sbet
vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
vtbl.8 d9, {q12}, d5
@ Write to q1 instead of q0, so the table and destination registers do
@ not overlap.
vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch
vtbl.8 d3, {q0}, d11
@ Here we restore the original q0/q1 usage. This instruction is
@ reordered from the ARMv8 version so we do not clobber the vtbl.8
@ below.
veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
vtbl.8 d3, {q13}, d7
veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
vtbl.8 d9, {q14}, d5
@ Write to q1 instead of q0, so the table and destination registers do
@ not overlap.
vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch
vtbl.8 d3, {q0}, d11
@ Here we restore the original q0/q1 usage. This instruction is
@ reordered from the ARMv8 version so we do not clobber the vtbl.8
@ below.
veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
vtbl.8 d3, {q15}, d7
vext.8 q5, q5, q5, #12 @ vpalignr $12, %xmm5, %xmm5, %xmm5
veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
subs r8, r8, #1 @ sub $1,%rax # nr--
Ldec_entry:
@ top of round
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i
vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
vtbl.8 d5, {q11}, d3
veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
vtbl.8 d7, {q10}, d1
vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
vtbl.8 d9, {q10}, d3
veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
vtbl.8 d5, {q10}, d7
vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
vtbl.8 d7, {q10}, d9
veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io
veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
vld1.64 {q0}, [r9]! @ vmovdqu (%r9), %xmm0
bne Ldec_loop
@ middle of last round
adr r10, Lk_dsbo
@ Write to q1 rather than q4 to avoid overlapping table and destination.
vld1.64 {q1}, [r10]! @ vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
vtbl.8 d9, {q1}, d5
@ Write to q2 rather than q1 to avoid overlapping table and destination.
vld1.64 {q2}, [r10] @ vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
vtbl.8 d2, {q2}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
vtbl.8 d3, {q2}, d7
vld1.64 {q2}, [r11] @ vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160
veor q4, q4, q0 @ vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
@ Write to q1 rather than q0 so the table and destination registers
@ below do not overlap.
veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm0 # 0 = A
vtbl.8 d0, {q1}, d4 @ vpshufb %xmm2, %xmm0, %xmm0
vtbl.8 d1, {q1}, d5
bx lr
.globl _vpaes_decrypt
.private_extern _vpaes_decrypt
#ifdef __thumb2__
.thumb_func _vpaes_decrypt
#endif
.align 4
_vpaes_decrypt:
@ _vpaes_decrypt_core uses r7-r11.
stmdb sp!, {r7,r8,r9,r10,r11,lr}
@ _vpaes_decrypt_core uses q4-q5 (d8-d11), which are callee-saved.
vstmdb sp!, {d8,d9,d10,d11}
vld1.64 {q0}, [r0]
bl _vpaes_preheat
bl _vpaes_decrypt_core
vst1.64 {q0}, [r1]
vldmia sp!, {d8,d9,d10,d11}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@ @@
@@ AES key schedule @@
@@ @@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This function diverges from both x86_64 and armv7 in which constants are
@ pinned. x86_64 has a common preheat function for all operations. aarch64
@ separates them because it has enough registers to pin nearly all constants.
@ armv7 does not have enough registers, but needing explicit loads and stores
@ also complicates using x86_64's register allocation directly.
@
@ We pin some constants for convenience and leave q14 and q15 free to load
@ others on demand.
@
@ Key schedule constants
@
.align 4
_vpaes_key_consts:
Lk_dksd:@ decryption key schedule: invskew x*D
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
Lk_dksb:@ decryption key schedule: invskew x*B
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
Lk_dkse:@ decryption key schedule: invskew x*E + 0x63
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
Lk_dks9:@ decryption key schedule: invskew x*9
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
Lk_rcon:@ rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
Lk_opt:@ output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
Lk_deskew:@ deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
#ifdef __thumb2__
.thumb_func _vpaes_key_preheat
#endif
.align 4
_vpaes_key_preheat:
adr r11, Lk_rcon
vmov.i8 q12, #0x5b @ Lk_s63
adr r10, Lk_inv @ Must be aligned to 8 mod 16.
vmov.i8 q9, #0x0f @ Lk_s0F
vld1.64 {q10,q11}, [r10] @ Lk_inv
vld1.64 {q8}, [r11] @ Lk_rcon
bx lr
#ifdef __thumb2__
.thumb_func _vpaes_schedule_core
#endif
.align 4
_vpaes_schedule_core:
@ We only need to save lr, but ARM requires an 8-byte stack alignment,
@ so save an extra register.
stmdb sp!, {r3,lr}
bl _vpaes_key_preheat @ load the tables
adr r11, Lk_ipt @ Must be aligned to 8 mod 16.
vld1.64 {q0}, [r0]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned)
@ input transform
@ Use q4 here rather than q3 so .Lschedule_am_decrypting does not
@ overlap table and destination.
vmov q4, q0 @ vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
adr r10, Lk_sr @ Must be aligned to 8 mod 16.
vmov q7, q0 @ vmovdqa %xmm0, %xmm7
add r8, r8, r10
tst r3, r3
bne Lschedule_am_decrypting
@ encrypting, output zeroth round key after transform
vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx)
b Lschedule_go
Lschedule_am_decrypting:
@ decrypting, output zeroth round key after shiftrows
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
vtbl.8 d6, {q4}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d7, {q4}, d3
vst1.64 {q3}, [r2] @ vmovdqu %xmm3, (%rdx)
eor r8, r8, #0x30 @ xor $0x30, %r8
Lschedule_go:
cmp r1, #192 @ cmp $192, %esi
bhi Lschedule_256
beq Lschedule_192
@ 128: fall though
@@
@@ .schedule_128
@@
@@ 128-bit specific part of key schedule.
@@
@@ This schedule is really simple, because all its parts
@@ are accomplished by the subroutines.
@@
Lschedule_128:
mov r0, #10 @ mov $10, %esi
Loop_schedule_128:
bl _vpaes_schedule_round
subs r0, r0, #1 @ dec %esi
beq Lschedule_mangle_last
bl _vpaes_schedule_mangle @ write output
b Loop_schedule_128
@@
@@ .aes_schedule_192
@@
@@ 192-bit specific part of key schedule.
@@
@@ The main body of this schedule is the same as the 128-bit
@@ schedule, but with more smearing. The long, high side is
@@ stored in q7 as before, and the short, low side is in
@@ the high bits of q6.
@@
@@ This schedule is somewhat nastier, however, because each
@@ round produces 192 bits of key material, or 1.5 round keys.
@@ Therefore, on each cycle we do 2 rounds and produce 3 round
@@ keys.
@@
.align 4
Lschedule_192:
sub r0, r0, #8
vld1.64 {q0}, [r0] @ vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
bl _vpaes_schedule_transform @ input transform
vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save short part
vmov.i8 d12, #0 @ vpxor %xmm4, %xmm4, %xmm4 # clear 4
@ vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
mov r0, #4 @ mov $4, %esi
Loop_schedule_192:
bl _vpaes_schedule_round
vext.8 q0, q6, q0, #8 @ vpalignr $8,%xmm6,%xmm0,%xmm0
bl _vpaes_schedule_mangle @ save key n
bl _vpaes_schedule_192_smear
bl _vpaes_schedule_mangle @ save key n+1
bl _vpaes_schedule_round
subs r0, r0, #1 @ dec %esi
beq Lschedule_mangle_last
bl _vpaes_schedule_mangle @ save key n+2
bl _vpaes_schedule_192_smear
b Loop_schedule_192
@@
@@ .aes_schedule_256
@@
@@ 256-bit specific part of key schedule.
@@
@@ The structure here is very similar to the 128-bit
@@ schedule, but with an additional "low side" in
@@ q6. The low side's rounds are the same as the
@@ high side's, except no rcon and no rotation.
@@
.align 4
Lschedule_256:
vld1.64 {q0}, [r0] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform @ input transform
mov r0, #7 @ mov $7, %esi
Loop_schedule_256:
bl _vpaes_schedule_mangle @ output low result
vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
@ high round
bl _vpaes_schedule_round
subs r0, r0, #1 @ dec %esi
beq Lschedule_mangle_last
bl _vpaes_schedule_mangle
@ low round. swap xmm7 and xmm6
vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0
vmov.i8 q4, #0
vmov q5, q7 @ vmovdqa %xmm7, %xmm5
vmov q7, q6 @ vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
vmov q7, q5 @ vmovdqa %xmm5, %xmm7
b Loop_schedule_256
@@
@@ .aes_schedule_mangle_last
@@
@@ Mangler for last round of key schedule
@@ Mangles q0
@@ when encrypting, outputs out(q0) ^ 63
@@ when decrypting, outputs unskew(q0)
@@
@@ Always called right before return... jumps to cleanup and exits
@@
.align 4
Lschedule_mangle_last:
@ schedule last round key from xmm0
adr r11, Lk_deskew @ lea Lk_deskew(%rip),%r11 # prepare to deskew
tst r3, r3
bne Lschedule_mangle_last_dec
@ encrypting
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1
adr r11, Lk_opt @ lea Lk_opt(%rip), %r11 # prepare to output transform
add r2, r2, #32 @ add $32, %rdx
vmov q2, q0
vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 # output permute
vtbl.8 d1, {q2}, d3
Lschedule_mangle_last_dec:
sub r2, r2, #16 @ add $-16, %rdx
veor q0, q0, q12 @ vpxor Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform @ output transform
vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) # save last key
@ cleanup
veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0
veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1
veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2
veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3
veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4
veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5
veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6
veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7
ldmia sp!, {r3,pc} @ return
@@
@@ .aes_schedule_192_smear
@@
@@ Smear the short, low side in the 192-bit key schedule.
@@
@@ Inputs:
@@ q7: high side, b a x y
@@ q6: low side, d c 0 0
@@
@@ Outputs:
@@ q6: b+c+d b+c 0 0
@@ q0: b+c+d b+c b a
@@
#ifdef __thumb2__
.thumb_func _vpaes_schedule_192_smear
#endif
.align 4
_vpaes_schedule_192_smear:
vmov.i8 q1, #0
vdup.32 q0, d15[1]
vshl.i64 q1, q6, #32 @ vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
vmov d0, d15 @ vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
veor q6, q6, q1 @ vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1
veor q6, q6, q0 @ vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
vmov q0, q6 @ vmovdqa %xmm6, %xmm0
vmov d12, d2 @ vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
bx lr
@@
@@ .aes_schedule_round
@@
@@ Runs one main round of the key schedule on q0, q7
@@
@@ Specifically, runs subbytes on the high dword of q0
@@ then rotates it by one byte and xors into the low dword of
@@ q7.
@@
@@ Adds rcon from low byte of q8, then rotates q8 for
@@ next rcon.
@@
@@ Smears the dwords of q7 by xoring the low into the
@@ second low, result into third, result into highest.
@@
@@ Returns results in q7 = q0.
@@ Clobbers q1-q4, r11.
@@
#ifdef __thumb2__
.thumb_func _vpaes_schedule_round
#endif
.align 4
_vpaes_schedule_round:
@ extract rcon from xmm8
vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4
vext.8 q1, q8, q4, #15 @ vpalignr $15, %xmm8, %xmm4, %xmm1
vext.8 q8, q8, q8, #15 @ vpalignr $15, %xmm8, %xmm8, %xmm8
veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7
@ rotate
vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0
vext.8 q0, q0, q0, #1 @ vpalignr $1, %xmm0, %xmm0, %xmm0
@ fall through...
@ low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
@ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12.
@ We pin other values in _vpaes_key_preheat, so load them now.
adr r11, Lk_sb1
vld1.64 {q14,q15}, [r11]
@ smear xmm7
vext.8 q1, q4, q7, #12 @ vpslldq $4, %xmm7, %xmm1
veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7
vext.8 q4, q4, q7, #8 @ vpslldq $8, %xmm7, %xmm4
@ subbytes
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i
veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7
vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
vtbl.8 d5, {q11}, d3
veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
vtbl.8 d7, {q10}, d1
veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
vtbl.8 d9, {q10}, d3
veor q7, q7, q12 @ vpxor Lk_s63(%rip), %xmm7, %xmm7
vtbl.8 d6, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
vtbl.8 d7, {q10}, d7
veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
vtbl.8 d4, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
vtbl.8 d5, {q10}, d9
veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io
veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
vtbl.8 d8, {q15}, d6 @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
vtbl.8 d9, {q15}, d7
vtbl.8 d2, {q14}, d4 @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
vtbl.8 d3, {q14}, d5
veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
@ add in smeared stuff
veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0
veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7
bx lr
@@
@@ .aes_schedule_transform
@@
@@ Linear-transform q0 according to tables at [r11]
@@
@@ Requires that q9 = 0x0F0F... as in preheat
@@ Output in q0
@@ Clobbers q1, q2, q14, q15
@@
#ifdef __thumb2__
.thumb_func _vpaes_schedule_transform
#endif
.align 4
_vpaes_schedule_transform:
vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo
@ vmovdqa 16(%r11), %xmm1 # hi
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0
vtbl.8 d4, {q14}, d2 @ vpshufb %xmm1, %xmm2, %xmm2
vtbl.8 d5, {q14}, d3
vtbl.8 d0, {q15}, d0 @ vpshufb %xmm0, %xmm1, %xmm0
vtbl.8 d1, {q15}, d1
veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
bx lr
@@
@@ .aes_schedule_mangle
@@
@@ Mangles q0 from (basis-transformed) standard version
@@ to our version.
@@
@@ On encrypt,
@@ xor with 0x63
@@ multiply by circulant 0,1,1,1
@@ apply shiftrows transform
@@
@@ On decrypt,
@@ xor with 0x63
@@ multiply by "inverse mixcolumns" circulant E,B,D,9
@@ deskew
@@ apply shiftrows transform
@@
@@
@@ Writes out to [r2], and increments or decrements it
@@ Keeps track of round number mod 4 in r8
@@ Preserves q0
@@ Clobbers q1-q5
@@
#ifdef __thumb2__
.thumb_func _vpaes_schedule_mangle
#endif
.align 4
_vpaes_schedule_mangle:
tst r3, r3
vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later
adr r11, Lk_mc_forward @ Must be aligned to 8 mod 16.
vld1.64 {q5}, [r11] @ vmovdqa Lk_mc_forward(%rip),%xmm5
bne Lschedule_mangle_dec
@ encrypting
@ Write to q2 so we do not overlap table and destination below.
veor q2, q0, q12 @ vpxor Lk_s63(%rip), %xmm0, %xmm4
add r2, r2, #16 @ add $16, %rdx
vtbl.8 d8, {q2}, d10 @ vpshufb %xmm5, %xmm4, %xmm4
vtbl.8 d9, {q2}, d11
vtbl.8 d2, {q4}, d10 @ vpshufb %xmm5, %xmm4, %xmm1
vtbl.8 d3, {q4}, d11
vtbl.8 d6, {q1}, d10 @ vpshufb %xmm5, %xmm1, %xmm3
vtbl.8 d7, {q1}, d11
veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3
b Lschedule_mangle_both
.align 4
Lschedule_mangle_dec:
@ inverse mix columns
adr r11, Lk_dksd @ lea Lk_dksd(%rip),%r11
vshr.u8 q1, q4, #4 @ vpsrlb $4, %xmm4, %xmm1 # 1 = hi
vand q4, q4, q9 @ vpand %xmm9, %xmm4, %xmm4 # 4 = lo
vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x00(%r11), %xmm2
@ vmovdqa 0x10(%r11), %xmm3
vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2
vtbl.8 d5, {q14}, d9
vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d7, {q15}, d3
@ Load .Lk_dksb ahead of time.
vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x20(%r11), %xmm2
@ vmovdqa 0x30(%r11), %xmm3
@ Write to q13 so we do not overlap table and destination.
veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3
vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3
vtbl.8 d7, {q13}, d11
vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2
vtbl.8 d5, {q14}, d9
veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2
vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d7, {q15}, d3
@ Load .Lk_dkse ahead of time.
vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x40(%r11), %xmm2
@ vmovdqa 0x50(%r11), %xmm3
@ Write to q13 so we do not overlap table and destination.
veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3
vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3
vtbl.8 d7, {q13}, d11
vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2
vtbl.8 d5, {q14}, d9
veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2
vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d7, {q15}, d3
@ Load .Lk_dkse ahead of time.
vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x60(%r11), %xmm2
@ vmovdqa 0x70(%r11), %xmm4
@ Write to q13 so we do not overlap table and destination.
veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3
vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2
vtbl.8 d5, {q14}, d9
vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3
vtbl.8 d7, {q13}, d11
vtbl.8 d8, {q15}, d2 @ vpshufb %xmm1, %xmm4, %xmm4
vtbl.8 d9, {q15}, d3
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2
veor q3, q4, q2 @ vpxor %xmm2, %xmm4, %xmm3
sub r2, r2, #16 @ add $-16, %rdx
Lschedule_mangle_both:
@ Write to q2 so table and destination do not overlap.
vtbl.8 d4, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d5, {q3}, d3
add r8, r8, #64-16 @ add $-16, %r8
and r8, r8, #~(1<<6) @ and $0x30, %r8
vst1.64 {q2}, [r2] @ vmovdqu %xmm3, (%rdx)
bx lr
.globl _vpaes_set_encrypt_key
.private_extern _vpaes_set_encrypt_key
#ifdef __thumb2__
.thumb_func _vpaes_set_encrypt_key
#endif
.align 4
_vpaes_set_encrypt_key:
stmdb sp!, {r7,r8,r9,r10,r11, lr}
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
lsr r9, r1, #5 @ shr $5,%eax
add r9, r9, #5 @ $5,%eax
str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov r3, #0 @ mov $0,%ecx
mov r8, #0x30 @ mov $0x30,%r8d
bl _vpaes_schedule_core
eor r0, r0, r0
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.globl _vpaes_set_decrypt_key
.private_extern _vpaes_set_decrypt_key
#ifdef __thumb2__
.thumb_func _vpaes_set_decrypt_key
#endif
.align 4
_vpaes_set_decrypt_key:
stmdb sp!, {r7,r8,r9,r10,r11, lr}
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
lsr r9, r1, #5 @ shr $5,%eax
add r9, r9, #5 @ $5,%eax
str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
lsl r9, r9, #4 @ shl $4,%eax
add r2, r2, #16 @ lea 16(%rdx,%rax),%rdx
add r2, r2, r9
mov r3, #1 @ mov $1,%ecx
lsr r8, r1, #1 @ shr $1,%r8d
and r8, r8, #32 @ and $32,%r8d
eor r8, r8, #32 @ xor $32,%r8d # nbits==192?0:32
bl _vpaes_schedule_core
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
@ Additional constants for converting to bsaes.
.align 4
_vpaes_convert_consts:
@ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear
@ transform in the AES S-box. 0x63 is incorporated into the low half of the
@ table. This was computed with the following script:
@
@ def u64s_to_u128(x, y):
@ return x | (y << 64)
@ def u128_to_u64s(w):
@ return w & ((1<<64)-1), w >> 64
@ def get_byte(w, i):
@ return (w >> (i*8)) & 0xff
@ def apply_table(table, b):
@ lo = b & 0xf
@ hi = b >> 4
@ return get_byte(table[0], lo) ^ get_byte(table[1], hi)
@ def opt(b):
@ table = [
@ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808),
@ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0),
@ ]
@ return apply_table(table, b)
@ def rot_byte(b, n):
@ return 0xff & ((b << n) | (b >> (8-n)))
@ def skew(x):
@ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^
@ rot_byte(x, 4))
@ table = [0, 0]
@ for i in range(16):
@ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8)
@ table[1] |= skew(opt(i<<4)) << (i*8)
@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[0]))
@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[1]))
Lk_opt_then_skew:
.quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b
.quad 0x1f30062936192f00, 0xb49bad829db284ab
@ .Lk_decrypt_transform is a permutation which performs an 8-bit left-rotation
@ followed by a byte-swap on each 32-bit word of a vector. E.g., 0x11223344
@ becomes 0x22334411 and then 0x11443322.
Lk_decrypt_transform:
.quad 0x0704050603000102, 0x0f0c0d0e0b08090a
@ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes);
.globl _vpaes_encrypt_key_to_bsaes
.private_extern _vpaes_encrypt_key_to_bsaes
#ifdef __thumb2__
.thumb_func _vpaes_encrypt_key_to_bsaes
#endif
.align 4
_vpaes_encrypt_key_to_bsaes:
stmdb sp!, {r11, lr}
@ See _vpaes_schedule_core for the key schedule logic. In particular,
@ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper),
@ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last
@ contain the transformations not in the bsaes representation. This
@ function inverts those transforms.
@
@ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
@ representation, which does not match the other aes_nohw_*
@ implementations. The ARM aes_nohw_* stores each 32-bit word
@ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
@ cost of extra REV and VREV32 operations in little-endian ARM.
vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform
adr r2, Lk_mc_forward @ Must be aligned to 8 mod 16.
add r3, r2, 0x90 @ Lk_sr+0x10-Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression)
vld1.64 {q12}, [r2]
vmov.i8 q10, #0x5b @ Lk_s63 from vpaes-x86_64
adr r11, Lk_opt @ Must be aligned to 8 mod 16.
vmov.i8 q11, #0x63 @ LK_s63 without Lk_ipt applied
@ vpaes stores one fewer round count than bsaes, but the number of keys
@ is the same.
ldr r2, [r1,#240]
add r2, r2, #1
str r2, [r0,#240]
@ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt).
@ Invert this with .Lk_opt.
vld1.64 {q0}, [r1]!
bl _vpaes_schedule_transform
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
@ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied,
@ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63,
@ multiplies by the circulant 0,1,1,1, then applies ShiftRows.
Loop_enc_key_to_bsaes:
vld1.64 {q0}, [r1]!
@ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle
@ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30.
@ We use r3 rather than r8 to avoid a callee-saved register.
vld1.64 {q1}, [r3]
vtbl.8 d4, {q0}, d2
vtbl.8 d5, {q0}, d3
add r3, r3, #16
and r3, r3, #~(1<<6)
vmov q0, q2
@ Handle the last key differently.
subs r2, r2, #1
beq Loop_enc_key_to_bsaes_last
@ Multiply by the circulant. This is its own inverse.
vtbl.8 d2, {q0}, d24
vtbl.8 d3, {q0}, d25
vmov q0, q1
vtbl.8 d4, {q1}, d24
vtbl.8 d5, {q1}, d25
veor q0, q0, q2
vtbl.8 d2, {q2}, d24
vtbl.8 d3, {q2}, d25
veor q0, q0, q1
@ XOR and finish.
veor q0, q0, q10
bl _vpaes_schedule_transform
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
b Loop_enc_key_to_bsaes
Loop_enc_key_to_bsaes_last:
@ The final key does not have a basis transform (note
@ .Lschedule_mangle_last inverts the original transform). It only XORs
@ 0x63 and applies ShiftRows. The latter was already inverted in the
@ loop. Note that, because we act on the original representation, we use
@ q11, not q10.
veor q0, q0, q11
vrev32.8 q0, q0
vst1.64 {q0}, [r0]
@ Wipe registers which contained key material.
veor q0, q0, q0
veor q1, q1, q1
veor q2, q2, q2
ldmia sp!, {r11, pc} @ return
@ void vpaes_decrypt_key_to_bsaes(AES_KEY *vpaes, const AES_KEY *bsaes);
.globl _vpaes_decrypt_key_to_bsaes
.private_extern _vpaes_decrypt_key_to_bsaes
#ifdef __thumb2__
.thumb_func _vpaes_decrypt_key_to_bsaes
#endif
.align 4
_vpaes_decrypt_key_to_bsaes:
stmdb sp!, {r11, lr}
@ See _vpaes_schedule_core for the key schedule logic. Note vpaes
@ computes the decryption key schedule in reverse. Additionally,
@ aes-x86_64.pl shares some transformations, so we must only partially
@ invert vpaes's transformations. In general, vpaes computes in a
@ different basis (.Lk_ipt and .Lk_opt) and applies the inverses of
@ MixColumns, ShiftRows, and the affine part of the AES S-box (which is
@ split into a linear skew and XOR of 0x63). We undo all but MixColumns.
@
@ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
@ representation, which does not match the other aes_nohw_*
@ implementations. The ARM aes_nohw_* stores each 32-bit word
@ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
@ cost of extra REV and VREV32 operations in little-endian ARM.
adr r2, Lk_decrypt_transform
adr r3, Lk_sr+0x30
adr r11, Lk_opt_then_skew @ Input to _vpaes_schedule_transform.
vld1.64 {q12}, [r2] @ Reuse q12 from encryption.
vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform
@ vpaes stores one fewer round count than bsaes, but the number of keys
@ is the same.
ldr r2, [r1,#240]
add r2, r2, #1
str r2, [r0,#240]
@ Undo the basis change and reapply the S-box affine transform. See
@ .Lschedule_mangle_last.
vld1.64 {q0}, [r1]!
bl _vpaes_schedule_transform
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
@ See _vpaes_schedule_mangle for the transform on the middle keys. Note
@ it simultaneously inverts MixColumns and the S-box affine transform.
@ See .Lk_dksd through .Lk_dks9.
Loop_dec_key_to_bsaes:
vld1.64 {q0}, [r1]!
@ Invert the ShiftRows step (see .Lschedule_mangle_both). Note going
@ forwards cancels inverting for which direction we cycle r3. We use r3
@ rather than r8 to avoid a callee-saved register.
vld1.64 {q1}, [r3]
vtbl.8 d4, {q0}, d2
vtbl.8 d5, {q0}, d3
add r3, r3, #64-16
and r3, r3, #~(1<<6)
vmov q0, q2
@ Handle the last key differently.
subs r2, r2, #1
beq Loop_dec_key_to_bsaes_last
@ Undo the basis change and reapply the S-box affine transform.
bl _vpaes_schedule_transform
@ Rotate each word by 8 bytes (cycle the rows) and then byte-swap. We
@ combine the two operations in .Lk_decrypt_transform.
@
@ TODO(davidben): Where does the rotation come from?
vtbl.8 d2, {q0}, d24
vtbl.8 d3, {q0}, d25
vst1.64 {q1}, [r0]!
b Loop_dec_key_to_bsaes
Loop_dec_key_to_bsaes_last:
@ The final key only inverts ShiftRows (already done in the loop). See
@ .Lschedule_am_decrypting. Its basis is not transformed.
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
@ Wipe registers which contained key material.
veor q0, q0, q0
veor q1, q1, q1
veor q2, q2, q2
ldmia sp!, {r11, pc} @ return
.globl _vpaes_ctr32_encrypt_blocks
.private_extern _vpaes_ctr32_encrypt_blocks
#ifdef __thumb2__
.thumb_func _vpaes_ctr32_encrypt_blocks
#endif
.align 4
_vpaes_ctr32_encrypt_blocks:
mov ip, sp
stmdb sp!, {r7,r8,r9,r10,r11, lr}
@ This function uses q4-q7 (d8-d15), which are callee-saved.
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
cmp r2, #0
@ r8 is passed on the stack.
ldr r8, [ip]
beq Lctr32_done
@ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3.
mov r9, r3
mov r3, r2
mov r2, r9
@ Load the IV and counter portion.
ldr r7, [r8, #12]
vld1.8 {q7}, [r8]
bl _vpaes_preheat
rev r7, r7 @ The counter is big-endian.
Lctr32_loop:
vmov q0, q7
vld1.8 {q6}, [r0]! @ Load input ahead of time
bl _vpaes_encrypt_core
veor q0, q0, q6 @ XOR input and result
vst1.8 {q0}, [r1]!
subs r3, r3, #1
@ Update the counter.
add r7, r7, #1
rev r9, r7
vmov.32 d15[1], r9
bne Lctr32_loop
Lctr32_done:
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
wlsfx/bnbb
| 21,755
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/fipsmodule/armv4-mont.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
#include <openssl/arm_arch.h>
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.globl _bn_mul_mont_nohw
.private_extern _bn_mul_mont_nohw
#ifdef __thumb2__
.thumb_func _bn_mul_mont_nohw
#endif
.align 5
_bn_mul_mont_nohw:
ldr ip,[sp,#4] @ load num
stmdb sp!,{r0,r2} @ sp points at argument block
cmp ip,#2
mov r0,ip @ load num
#ifdef __thumb2__
ittt lt
#endif
movlt r0,#0
addlt sp,sp,#2*4
blt Labrt
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers
mov r0,r0,lsl#2 @ rescale r0 for byte count
sub sp,sp,r0 @ alloca(4*num)
sub sp,sp,#4 @ +extra dword
sub r0,r0,#4 @ "num=num-1"
add r4,r2,r0 @ &bp[num-1]
add r0,sp,r0 @ r0 to point at &tp[num-1]
ldr r8,[r0,#14*4] @ &n0
ldr r2,[r2] @ bp[0]
ldr r5,[r1],#4 @ ap[0],ap++
ldr r6,[r3],#4 @ np[0],np++
ldr r8,[r8] @ *n0
str r4,[r0,#15*4] @ save &bp[num]
umull r10,r11,r5,r2 @ ap[0]*bp[0]
str r8,[r0,#14*4] @ save n0 value
mul r8,r10,r8 @ "tp[0]"*n0
mov r12,#0
umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]"
mov r4,sp
L1st:
ldr r5,[r1],#4 @ ap[j],ap++
mov r10,r11
ldr r6,[r3],#4 @ np[j],np++
mov r11,#0
umlal r10,r11,r5,r2 @ ap[j]*bp[0]
mov r14,#0
umlal r12,r14,r6,r8 @ np[j]*n0
adds r12,r12,r10
str r12,[r4],#4 @ tp[j-1]=,tp++
adc r12,r14,#0
cmp r4,r0
bne L1st
adds r12,r12,r11
ldr r4,[r0,#13*4] @ restore bp
mov r14,#0
ldr r8,[r0,#14*4] @ restore n0
adc r14,r14,#0
str r12,[r0] @ tp[num-1]=
mov r7,sp
str r14,[r0,#4] @ tp[num]=
Louter:
sub r7,r0,r7 @ "original" r0-1 value
sub r1,r1,r7 @ "rewind" ap to &ap[1]
ldr r2,[r4,#4]! @ *(++bp)
sub r3,r3,r7 @ "rewind" np to &np[1]
ldr r5,[r1,#-4] @ ap[0]
ldr r10,[sp] @ tp[0]
ldr r6,[r3,#-4] @ np[0]
ldr r7,[sp,#4] @ tp[1]
mov r11,#0
umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0]
str r4,[r0,#13*4] @ save bp
mul r8,r10,r8
mov r12,#0
umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]"
mov r4,sp
Linner:
ldr r5,[r1],#4 @ ap[j],ap++
adds r10,r11,r7 @ +=tp[j]
ldr r6,[r3],#4 @ np[j],np++
mov r11,#0
umlal r10,r11,r5,r2 @ ap[j]*bp[i]
mov r14,#0
umlal r12,r14,r6,r8 @ np[j]*n0
adc r11,r11,#0
ldr r7,[r4,#8] @ tp[j+1]
adds r12,r12,r10
str r12,[r4],#4 @ tp[j-1]=,tp++
adc r12,r14,#0
cmp r4,r0
bne Linner
adds r12,r12,r11
mov r14,#0
ldr r4,[r0,#13*4] @ restore bp
adc r14,r14,#0
ldr r8,[r0,#14*4] @ restore n0
adds r12,r12,r7
ldr r7,[r0,#15*4] @ restore &bp[num]
adc r14,r14,#0
str r12,[r0] @ tp[num-1]=
str r14,[r0,#4] @ tp[num]=
cmp r4,r7
#ifdef __thumb2__
itt ne
#endif
movne r7,sp
bne Louter
ldr r2,[r0,#12*4] @ pull rp
mov r5,sp
add r0,r0,#4 @ r0 to point at &tp[num]
sub r5,r0,r5 @ "original" num value
mov r4,sp @ "rewind" r4
mov r1,r4 @ "borrow" r1
sub r3,r3,r5 @ "rewind" r3 to &np[0]
subs r7,r7,r7 @ "clear" carry flag
Lsub: ldr r7,[r4],#4
ldr r6,[r3],#4
sbcs r7,r7,r6 @ tp[j]-np[j]
str r7,[r2],#4 @ rp[j]=
teq r4,r0 @ preserve carry
bne Lsub
sbcs r14,r14,#0 @ upmost carry
mov r4,sp @ "rewind" r4
sub r2,r2,r5 @ "rewind" r2
Lcopy: ldr r7,[r4] @ conditional copy
ldr r5,[r2]
str sp,[r4],#4 @ zap tp
#ifdef __thumb2__
it cc
#endif
movcc r5,r7
str r5,[r2],#4
teq r4,r0 @ preserve carry
bne Lcopy
mov sp,r0
add sp,sp,#4 @ skip over tp[num+1]
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers
add sp,sp,#2*4 @ skip over {r0,r2}
mov r0,#1
Labrt:
#if __ARM_ARCH>=5
bx lr @ bx lr
#else
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
#if __ARM_MAX_ARCH__>=7
.globl _bn_mul8x_mont_neon
.private_extern _bn_mul8x_mont_neon
#ifdef __thumb2__
.thumb_func _bn_mul8x_mont_neon
#endif
.align 5
_bn_mul8x_mont_neon:
mov ip,sp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldmia ip,{r4,r5} @ load rest of parameter block
mov ip,sp
cmp r5,#8
bhi LNEON_8n
@ special case for r5==8, everything is in register bank...
vld1.32 {d28[0]}, [r2,:32]!
veor d8,d8,d8
sub r7,sp,r5,lsl#4
vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-(
and r7,r7,#-64
vld1.32 {d30[0]}, [r4,:32]
mov sp,r7 @ alloca
vzip.16 d28,d8
vmull.u32 q6,d28,d0[0]
vmull.u32 q7,d28,d0[1]
vmull.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmull.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
veor d8,d8,d8
vmul.u32 d29,d29,d30
vmull.u32 q10,d28,d2[0]
vld1.32 {d4,d5,d6,d7}, [r3]!
vmull.u32 q11,d28,d2[1]
vmull.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmull.u32 q13,d28,d3[1]
vmlal.u32 q6,d29,d4[0]
sub r9,r5,#1
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmov q5,q6
vmlal.u32 q11,d29,d6[1]
vmov q6,q7
vmlal.u32 q12,d29,d7[0]
vmov q7,q8
vmlal.u32 q13,d29,d7[1]
vmov q8,q9
vmov q9,q10
vshr.u64 d10,d10,#16
vmov q10,q11
vmov q11,q12
vadd.u64 d10,d10,d11
vmov q12,q13
veor q13,q13
vshr.u64 d10,d10,#16
b LNEON_outer8
.align 4
LNEON_outer8:
vld1.32 {d28[0]}, [r2,:32]!
veor d8,d8,d8
vzip.16 d28,d8
vadd.u64 d12,d12,d10
vmlal.u32 q6,d28,d0[0]
vmlal.u32 q7,d28,d0[1]
vmlal.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmlal.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
veor d8,d8,d8
subs r9,r9,#1
vmul.u32 d29,d29,d30
vmlal.u32 q10,d28,d2[0]
vmlal.u32 q11,d28,d2[1]
vmlal.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q13,d28,d3[1]
vmlal.u32 q6,d29,d4[0]
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmov q5,q6
vmlal.u32 q11,d29,d6[1]
vmov q6,q7
vmlal.u32 q12,d29,d7[0]
vmov q7,q8
vmlal.u32 q13,d29,d7[1]
vmov q8,q9
vmov q9,q10
vshr.u64 d10,d10,#16
vmov q10,q11
vmov q11,q12
vadd.u64 d10,d10,d11
vmov q12,q13
veor q13,q13
vshr.u64 d10,d10,#16
bne LNEON_outer8
vadd.u64 d12,d12,d10
mov r7,sp
vshr.u64 d10,d12,#16
mov r8,r5
vadd.u64 d13,d13,d10
add r6,sp,#96
vshr.u64 d10,d13,#16
vzip.16 d12,d13
b LNEON_tail_entry
.align 4
LNEON_8n:
veor q6,q6,q6
sub r7,sp,#128
veor q7,q7,q7
sub r7,r7,r5,lsl#4
veor q8,q8,q8
and r7,r7,#-64
veor q9,q9,q9
mov sp,r7 @ alloca
veor q10,q10,q10
add r7,r7,#256
veor q11,q11,q11
sub r8,r5,#8
veor q12,q12,q12
veor q13,q13,q13
LNEON_8n_init:
vst1.64 {q6,q7},[r7,:256]!
subs r8,r8,#8
vst1.64 {q8,q9},[r7,:256]!
vst1.64 {q10,q11},[r7,:256]!
vst1.64 {q12,q13},[r7,:256]!
bne LNEON_8n_init
add r6,sp,#256
vld1.32 {d0,d1,d2,d3},[r1]!
add r10,sp,#8
vld1.32 {d30[0]},[r4,:32]
mov r9,r5
b LNEON_8n_outer
.align 4
LNEON_8n_outer:
vld1.32 {d28[0]},[r2,:32]! @ *b++
veor d8,d8,d8
vzip.16 d28,d8
add r7,sp,#128
vld1.32 {d4,d5,d6,d7},[r3]!
vmlal.u32 q6,d28,d0[0]
vmlal.u32 q7,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmlal.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
vmlal.u32 q10,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q11,d28,d2[1]
vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0]
vmlal.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q13,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q6,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q7,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q8,d29,d5[0]
vshr.u64 d12,d12,#16
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vadd.u64 d12,d12,d13
vmlal.u32 q11,d29,d6[1]
vshr.u64 d12,d12,#16
vmlal.u32 q12,d29,d7[0]
vmlal.u32 q13,d29,d7[1]
vadd.u64 d14,d14,d12
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0]
vmlal.u32 q7,d28,d0[0]
vld1.64 {q6},[r6,:128]!
vmlal.u32 q8,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q9,d28,d1[0]
vshl.i64 d29,d15,#16
vmlal.u32 q10,d28,d1[1]
vadd.u64 d29,d29,d14
vmlal.u32 q11,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q12,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1]
vmlal.u32 q13,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q6,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q7,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q8,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q9,d29,d5[0]
vshr.u64 d14,d14,#16
vmlal.u32 q10,d29,d5[1]
vmlal.u32 q11,d29,d6[0]
vadd.u64 d14,d14,d15
vmlal.u32 q12,d29,d6[1]
vshr.u64 d14,d14,#16
vmlal.u32 q13,d29,d7[0]
vmlal.u32 q6,d29,d7[1]
vadd.u64 d16,d16,d14
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1]
vmlal.u32 q8,d28,d0[0]
vld1.64 {q7},[r6,:128]!
vmlal.u32 q9,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q10,d28,d1[0]
vshl.i64 d29,d17,#16
vmlal.u32 q11,d28,d1[1]
vadd.u64 d29,d29,d16
vmlal.u32 q12,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q13,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2]
vmlal.u32 q6,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q7,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q8,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q9,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q10,d29,d5[0]
vshr.u64 d16,d16,#16
vmlal.u32 q11,d29,d5[1]
vmlal.u32 q12,d29,d6[0]
vadd.u64 d16,d16,d17
vmlal.u32 q13,d29,d6[1]
vshr.u64 d16,d16,#16
vmlal.u32 q6,d29,d7[0]
vmlal.u32 q7,d29,d7[1]
vadd.u64 d18,d18,d16
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2]
vmlal.u32 q9,d28,d0[0]
vld1.64 {q8},[r6,:128]!
vmlal.u32 q10,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q11,d28,d1[0]
vshl.i64 d29,d19,#16
vmlal.u32 q12,d28,d1[1]
vadd.u64 d29,d29,d18
vmlal.u32 q13,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q6,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3]
vmlal.u32 q7,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q8,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q9,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q10,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q11,d29,d5[0]
vshr.u64 d18,d18,#16
vmlal.u32 q12,d29,d5[1]
vmlal.u32 q13,d29,d6[0]
vadd.u64 d18,d18,d19
vmlal.u32 q6,d29,d6[1]
vshr.u64 d18,d18,#16
vmlal.u32 q7,d29,d7[0]
vmlal.u32 q8,d29,d7[1]
vadd.u64 d20,d20,d18
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3]
vmlal.u32 q10,d28,d0[0]
vld1.64 {q9},[r6,:128]!
vmlal.u32 q11,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q12,d28,d1[0]
vshl.i64 d29,d21,#16
vmlal.u32 q13,d28,d1[1]
vadd.u64 d29,d29,d20
vmlal.u32 q6,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q7,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4]
vmlal.u32 q8,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q9,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q10,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q11,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q12,d29,d5[0]
vshr.u64 d20,d20,#16
vmlal.u32 q13,d29,d5[1]
vmlal.u32 q6,d29,d6[0]
vadd.u64 d20,d20,d21
vmlal.u32 q7,d29,d6[1]
vshr.u64 d20,d20,#16
vmlal.u32 q8,d29,d7[0]
vmlal.u32 q9,d29,d7[1]
vadd.u64 d22,d22,d20
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4]
vmlal.u32 q11,d28,d0[0]
vld1.64 {q10},[r6,:128]!
vmlal.u32 q12,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q13,d28,d1[0]
vshl.i64 d29,d23,#16
vmlal.u32 q6,d28,d1[1]
vadd.u64 d29,d29,d22
vmlal.u32 q7,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q8,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5]
vmlal.u32 q9,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q10,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q11,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q12,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q13,d29,d5[0]
vshr.u64 d22,d22,#16
vmlal.u32 q6,d29,d5[1]
vmlal.u32 q7,d29,d6[0]
vadd.u64 d22,d22,d23
vmlal.u32 q8,d29,d6[1]
vshr.u64 d22,d22,#16
vmlal.u32 q9,d29,d7[0]
vmlal.u32 q10,d29,d7[1]
vadd.u64 d24,d24,d22
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5]
vmlal.u32 q12,d28,d0[0]
vld1.64 {q11},[r6,:128]!
vmlal.u32 q13,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q6,d28,d1[0]
vshl.i64 d29,d25,#16
vmlal.u32 q7,d28,d1[1]
vadd.u64 d29,d29,d24
vmlal.u32 q8,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q9,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6]
vmlal.u32 q10,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q11,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q12,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q13,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q6,d29,d5[0]
vshr.u64 d24,d24,#16
vmlal.u32 q7,d29,d5[1]
vmlal.u32 q8,d29,d6[0]
vadd.u64 d24,d24,d25
vmlal.u32 q9,d29,d6[1]
vshr.u64 d24,d24,#16
vmlal.u32 q10,d29,d7[0]
vmlal.u32 q11,d29,d7[1]
vadd.u64 d26,d26,d24
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6]
vmlal.u32 q13,d28,d0[0]
vld1.64 {q12},[r6,:128]!
vmlal.u32 q6,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q7,d28,d1[0]
vshl.i64 d29,d27,#16
vmlal.u32 q8,d28,d1[1]
vadd.u64 d29,d29,d26
vmlal.u32 q9,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q10,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7]
vmlal.u32 q11,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q12,d28,d3[1]
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
vmlal.u32 q13,d29,d4[0]
vld1.32 {d0,d1,d2,d3},[r1]!
vmlal.u32 q6,d29,d4[1]
vmlal.u32 q7,d29,d5[0]
vshr.u64 d26,d26,#16
vmlal.u32 q8,d29,d5[1]
vmlal.u32 q9,d29,d6[0]
vadd.u64 d26,d26,d27
vmlal.u32 q10,d29,d6[1]
vshr.u64 d26,d26,#16
vmlal.u32 q11,d29,d7[0]
vmlal.u32 q12,d29,d7[1]
vadd.u64 d12,d12,d26
vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7]
add r10,sp,#8 @ rewind
sub r8,r5,#8
b LNEON_8n_inner
.align 4
LNEON_8n_inner:
subs r8,r8,#8
vmlal.u32 q6,d28,d0[0]
vld1.64 {q13},[r6,:128]
vmlal.u32 q7,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0]
vmlal.u32 q8,d28,d1[0]
vld1.32 {d4,d5,d6,d7},[r3]!
vmlal.u32 q9,d28,d1[1]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q10,d28,d2[0]
vmlal.u32 q11,d28,d2[1]
vmlal.u32 q12,d28,d3[0]
vmlal.u32 q13,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1]
vmlal.u32 q6,d29,d4[0]
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmlal.u32 q11,d29,d6[1]
vmlal.u32 q12,d29,d7[0]
vmlal.u32 q13,d29,d7[1]
vst1.64 {q6},[r7,:128]!
vmlal.u32 q7,d28,d0[0]
vld1.64 {q6},[r6,:128]
vmlal.u32 q8,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1]
vmlal.u32 q9,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q10,d28,d1[1]
vmlal.u32 q11,d28,d2[0]
vmlal.u32 q12,d28,d2[1]
vmlal.u32 q13,d28,d3[0]
vmlal.u32 q6,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2]
vmlal.u32 q7,d29,d4[0]
vmlal.u32 q8,d29,d4[1]
vmlal.u32 q9,d29,d5[0]
vmlal.u32 q10,d29,d5[1]
vmlal.u32 q11,d29,d6[0]
vmlal.u32 q12,d29,d6[1]
vmlal.u32 q13,d29,d7[0]
vmlal.u32 q6,d29,d7[1]
vst1.64 {q7},[r7,:128]!
vmlal.u32 q8,d28,d0[0]
vld1.64 {q7},[r6,:128]
vmlal.u32 q9,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2]
vmlal.u32 q10,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q11,d28,d1[1]
vmlal.u32 q12,d28,d2[0]
vmlal.u32 q13,d28,d2[1]
vmlal.u32 q6,d28,d3[0]
vmlal.u32 q7,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3]
vmlal.u32 q8,d29,d4[0]
vmlal.u32 q9,d29,d4[1]
vmlal.u32 q10,d29,d5[0]
vmlal.u32 q11,d29,d5[1]
vmlal.u32 q12,d29,d6[0]
vmlal.u32 q13,d29,d6[1]
vmlal.u32 q6,d29,d7[0]
vmlal.u32 q7,d29,d7[1]
vst1.64 {q8},[r7,:128]!
vmlal.u32 q9,d28,d0[0]
vld1.64 {q8},[r6,:128]
vmlal.u32 q10,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3]
vmlal.u32 q11,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q12,d28,d1[1]
vmlal.u32 q13,d28,d2[0]
vmlal.u32 q6,d28,d2[1]
vmlal.u32 q7,d28,d3[0]
vmlal.u32 q8,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4]
vmlal.u32 q9,d29,d4[0]
vmlal.u32 q10,d29,d4[1]
vmlal.u32 q11,d29,d5[0]
vmlal.u32 q12,d29,d5[1]
vmlal.u32 q13,d29,d6[0]
vmlal.u32 q6,d29,d6[1]
vmlal.u32 q7,d29,d7[0]
vmlal.u32 q8,d29,d7[1]
vst1.64 {q9},[r7,:128]!
vmlal.u32 q10,d28,d0[0]
vld1.64 {q9},[r6,:128]
vmlal.u32 q11,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4]
vmlal.u32 q12,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q13,d28,d1[1]
vmlal.u32 q6,d28,d2[0]
vmlal.u32 q7,d28,d2[1]
vmlal.u32 q8,d28,d3[0]
vmlal.u32 q9,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5]
vmlal.u32 q10,d29,d4[0]
vmlal.u32 q11,d29,d4[1]
vmlal.u32 q12,d29,d5[0]
vmlal.u32 q13,d29,d5[1]
vmlal.u32 q6,d29,d6[0]
vmlal.u32 q7,d29,d6[1]
vmlal.u32 q8,d29,d7[0]
vmlal.u32 q9,d29,d7[1]
vst1.64 {q10},[r7,:128]!
vmlal.u32 q11,d28,d0[0]
vld1.64 {q10},[r6,:128]
vmlal.u32 q12,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5]
vmlal.u32 q13,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q6,d28,d1[1]
vmlal.u32 q7,d28,d2[0]
vmlal.u32 q8,d28,d2[1]
vmlal.u32 q9,d28,d3[0]
vmlal.u32 q10,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6]
vmlal.u32 q11,d29,d4[0]
vmlal.u32 q12,d29,d4[1]
vmlal.u32 q13,d29,d5[0]
vmlal.u32 q6,d29,d5[1]
vmlal.u32 q7,d29,d6[0]
vmlal.u32 q8,d29,d6[1]
vmlal.u32 q9,d29,d7[0]
vmlal.u32 q10,d29,d7[1]
vst1.64 {q11},[r7,:128]!
vmlal.u32 q12,d28,d0[0]
vld1.64 {q11},[r6,:128]
vmlal.u32 q13,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6]
vmlal.u32 q6,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q7,d28,d1[1]
vmlal.u32 q8,d28,d2[0]
vmlal.u32 q9,d28,d2[1]
vmlal.u32 q10,d28,d3[0]
vmlal.u32 q11,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7]
vmlal.u32 q12,d29,d4[0]
vmlal.u32 q13,d29,d4[1]
vmlal.u32 q6,d29,d5[0]
vmlal.u32 q7,d29,d5[1]
vmlal.u32 q8,d29,d6[0]
vmlal.u32 q9,d29,d6[1]
vmlal.u32 q10,d29,d7[0]
vmlal.u32 q11,d29,d7[1]
vst1.64 {q12},[r7,:128]!
vmlal.u32 q13,d28,d0[0]
vld1.64 {q12},[r6,:128]
vmlal.u32 q6,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7]
vmlal.u32 q7,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q8,d28,d1[1]
vmlal.u32 q9,d28,d2[0]
vmlal.u32 q10,d28,d2[1]
vmlal.u32 q11,d28,d3[0]
vmlal.u32 q12,d28,d3[1]
it eq
subeq r1,r1,r5,lsl#2 @ rewind
vmlal.u32 q13,d29,d4[0]
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
vmlal.u32 q6,d29,d4[1]
vld1.32 {d0,d1,d2,d3},[r1]!
vmlal.u32 q7,d29,d5[0]
add r10,sp,#8 @ rewind
vmlal.u32 q8,d29,d5[1]
vmlal.u32 q9,d29,d6[0]
vmlal.u32 q10,d29,d6[1]
vmlal.u32 q11,d29,d7[0]
vst1.64 {q13},[r7,:128]!
vmlal.u32 q12,d29,d7[1]
bne LNEON_8n_inner
add r6,sp,#128
vst1.64 {q6,q7},[r7,:256]!
veor q2,q2,q2 @ d4-d5
vst1.64 {q8,q9},[r7,:256]!
veor q3,q3,q3 @ d6-d7
vst1.64 {q10,q11},[r7,:256]!
vst1.64 {q12},[r7,:128]
subs r9,r9,#8
vld1.64 {q6,q7},[r6,:256]!
vld1.64 {q8,q9},[r6,:256]!
vld1.64 {q10,q11},[r6,:256]!
vld1.64 {q12,q13},[r6,:256]!
itt ne
subne r3,r3,r5,lsl#2 @ rewind
bne LNEON_8n_outer
add r7,sp,#128
vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame
vshr.u64 d10,d12,#16
vst1.64 {q2,q3},[sp,:256]!
vadd.u64 d13,d13,d10
vst1.64 {q2,q3}, [sp,:256]!
vshr.u64 d10,d13,#16
vst1.64 {q2,q3}, [sp,:256]!
vzip.16 d12,d13
mov r8,r5
b LNEON_tail_entry
.align 4
LNEON_tail:
vadd.u64 d12,d12,d10
vshr.u64 d10,d12,#16
vld1.64 {q8,q9}, [r6, :256]!
vadd.u64 d13,d13,d10
vld1.64 {q10,q11}, [r6, :256]!
vshr.u64 d10,d13,#16
vld1.64 {q12,q13}, [r6, :256]!
vzip.16 d12,d13
LNEON_tail_entry:
vadd.u64 d14,d14,d10
vst1.32 {d12[0]}, [r7, :32]!
vshr.u64 d10,d14,#16
vadd.u64 d15,d15,d10
vshr.u64 d10,d15,#16
vzip.16 d14,d15
vadd.u64 d16,d16,d10
vst1.32 {d14[0]}, [r7, :32]!
vshr.u64 d10,d16,#16
vadd.u64 d17,d17,d10
vshr.u64 d10,d17,#16
vzip.16 d16,d17
vadd.u64 d18,d18,d10
vst1.32 {d16[0]}, [r7, :32]!
vshr.u64 d10,d18,#16
vadd.u64 d19,d19,d10
vshr.u64 d10,d19,#16
vzip.16 d18,d19
vadd.u64 d20,d20,d10
vst1.32 {d18[0]}, [r7, :32]!
vshr.u64 d10,d20,#16
vadd.u64 d21,d21,d10
vshr.u64 d10,d21,#16
vzip.16 d20,d21
vadd.u64 d22,d22,d10
vst1.32 {d20[0]}, [r7, :32]!
vshr.u64 d10,d22,#16
vadd.u64 d23,d23,d10
vshr.u64 d10,d23,#16
vzip.16 d22,d23
vadd.u64 d24,d24,d10
vst1.32 {d22[0]}, [r7, :32]!
vshr.u64 d10,d24,#16
vadd.u64 d25,d25,d10
vshr.u64 d10,d25,#16
vzip.16 d24,d25
vadd.u64 d26,d26,d10
vst1.32 {d24[0]}, [r7, :32]!
vshr.u64 d10,d26,#16
vadd.u64 d27,d27,d10
vshr.u64 d10,d27,#16
vzip.16 d26,d27
vld1.64 {q6,q7}, [r6, :256]!
subs r8,r8,#8
vst1.32 {d26[0]}, [r7, :32]!
bne LNEON_tail
vst1.32 {d10[0]}, [r7, :32] @ top-most bit
sub r3,r3,r5,lsl#2 @ rewind r3
subs r1,sp,#0 @ clear carry flag
add r2,sp,r5,lsl#2
LNEON_sub:
ldmia r1!, {r4,r5,r6,r7}
ldmia r3!, {r8,r9,r10,r11}
sbcs r8, r4,r8
sbcs r9, r5,r9
sbcs r10,r6,r10
sbcs r11,r7,r11
teq r1,r2 @ preserves carry
stmia r0!, {r8,r9,r10,r11}
bne LNEON_sub
ldr r10, [r1] @ load top-most bit
mov r11,sp
veor q0,q0,q0
sub r11,r2,r11 @ this is num*4
veor q1,q1,q1
mov r1,sp
sub r0,r0,r11 @ rewind r0
mov r3,r2 @ second 3/4th of frame
sbcs r10,r10,#0 @ result is carry flag
LNEON_copy_n_zap:
ldmia r1!, {r4,r5,r6,r7}
ldmia r0, {r8,r9,r10,r11}
it cc
movcc r8, r4
vst1.64 {q0,q1}, [r3,:256]! @ wipe
itt cc
movcc r9, r5
movcc r10,r6
vst1.64 {q0,q1}, [r3,:256]! @ wipe
it cc
movcc r11,r7
ldmia r1, {r4,r5,r6,r7}
stmia r0!, {r8,r9,r10,r11}
sub r1,r1,#16
ldmia r0, {r8,r9,r10,r11}
it cc
movcc r8, r4
vst1.64 {q0,q1}, [r1,:256]! @ wipe
itt cc
movcc r9, r5
movcc r10,r6
vst1.64 {q0,q1}, [r3,:256]! @ wipe
it cc
movcc r11,r7
teq r1,r2 @ preserves carry
stmia r0!, {r8,r9,r10,r11}
bne LNEON_copy_n_zap
mov sp,ip
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
bx lr @ bx lr
#endif
.byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
wlsfx/bnbb
| 64,769
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/fipsmodule/sha256-armv4.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the OpenSSL license (the "License"). You may not use
@ this file except in compliance with the License. You can obtain a copy
@ in the file LICENSE in the source distribution or at
@ https://www.openssl.org/source/license.html
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@
@ Permission to use under GPL terms is granted.
@ ====================================================================
@ SHA256 block procedure for ARMv4. May 2007.
@ Performance is ~2x better than gcc 3.4 generated code and in "abso-
@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
@ byte [on single-issue Xscale PXA250 core].
@ July 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 22% improvement on
@ Cortex A8 core and ~20 cycles per processed byte.
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 16%
@ improvement on Cortex A8 core and ~15.4 cycles per processed byte.
@ September 2013.
@
@ Add NEON implementation. On Cortex A8 it was measured to process one
@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
@ code (meaning that latter performs sub-optimally, nothing was done
@ about it).
@ May 2014.
@
@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
#ifndef __KERNEL__
# include <openssl/arm_arch.h>
#else
# define __ARM_ARCH __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ 7
#endif
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors. It does have ARMv8-only code, but those
@ instructions are manually-encoded. (See unsha256.)
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.align 5
K256:
.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.word 0 @ terminator
.align 5
.globl _sha256_block_data_order_nohw
.private_extern _sha256_block_data_order_nohw
#ifdef __thumb2__
.thumb_func _sha256_block_data_order_nohw
#endif
_sha256_block_data_order_nohw:
add r2,r1,r2,lsl#6 @ len to point at the end of inp
stmdb sp!,{r0,r1,r2,r4-r11,lr}
ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
adr r14,K256
sub sp,sp,#16*4 @ alloca(X[16])
Loop:
# if __ARM_ARCH>=7
ldr r2,[r1],#4
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ magic
eor r12,r12,r12
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 0
# if 0==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 0
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 0==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r8,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#0*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 0==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 0<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#2*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#15*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 1
# if 1==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 1
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 1==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r7,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#1*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 1==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 1<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#3*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#0*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 2
# if 2==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 2
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 2==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r6,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#2*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 2==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 2<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#4*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#1*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 3
# if 3==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 3
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 3==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r5,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#3*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 3==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 3<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#5*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#2*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 4
# if 4==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 4
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 4==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r4,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#4*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 4==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 4<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#6*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#3*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 5
# if 5==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 5==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r11,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#5*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 5==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 5<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#7*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#4*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 6
# if 6==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 6
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 6==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r10,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#6*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 6==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 6<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#8*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#5*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 7
# if 7==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 7==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r9,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#7*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 7==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 7<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#9*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#6*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 8
# if 8==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 8
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 8==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r8,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#8*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 8==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 8<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#10*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#7*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 9
# if 9==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 9
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 9==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r7,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#9*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 9==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 9<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#11*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#8*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 10
# if 10==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 10
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 10==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r6,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#10*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 10==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 10<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#12*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#9*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 11
# if 11==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 11
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 11==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r5,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#11*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 11==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 11<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#13*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#10*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 12
# if 12==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 12
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 12==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r4,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#12*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 12==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 12<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#14*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#11*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 13
# if 13==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 13
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 13==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r11,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#13*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 13==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 13<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#15*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#12*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 14
# if 14==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 14
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 14==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r10,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#14*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 14==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 14<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#0*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#13*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
@ ldr r2,[r1],#4 @ 15
# if 15==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 15
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 15==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r9,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#15*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 15==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 15<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#1*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#14*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
Lrounds_16_xx:
@ ldr r2,[sp,#1*4] @ 16
@ ldr r1,[sp,#14*4]
mov r0,r2,ror#7
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#0*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#9*4]
add r12,r12,r0
eor r0,r8,r8,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r8,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#0*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 16==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 16<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#2*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#15*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#2*4] @ 17
@ ldr r1,[sp,#15*4]
mov r0,r2,ror#7
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#1*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#10*4]
add r3,r3,r0
eor r0,r7,r7,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r7,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#1*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 17==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 17<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#3*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#0*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#3*4] @ 18
@ ldr r1,[sp,#0*4]
mov r0,r2,ror#7
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#2*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#11*4]
add r12,r12,r0
eor r0,r6,r6,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r6,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#2*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 18==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 18<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#4*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#1*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#4*4] @ 19
@ ldr r1,[sp,#1*4]
mov r0,r2,ror#7
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#3*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#12*4]
add r3,r3,r0
eor r0,r5,r5,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r5,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#3*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 19==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 19<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#5*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#2*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#5*4] @ 20
@ ldr r1,[sp,#2*4]
mov r0,r2,ror#7
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#4*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#13*4]
add r12,r12,r0
eor r0,r4,r4,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r4,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#4*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 20==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 20<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#6*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#3*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#6*4] @ 21
@ ldr r1,[sp,#3*4]
mov r0,r2,ror#7
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#5*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#14*4]
add r3,r3,r0
eor r0,r11,r11,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r11,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#5*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 21==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 21<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#7*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#4*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#7*4] @ 22
@ ldr r1,[sp,#4*4]
mov r0,r2,ror#7
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#6*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#15*4]
add r12,r12,r0
eor r0,r10,r10,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r10,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#6*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 22==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 22<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#8*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#5*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#8*4] @ 23
@ ldr r1,[sp,#5*4]
mov r0,r2,ror#7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#7*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#0*4]
add r3,r3,r0
eor r0,r9,r9,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r9,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#7*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 23==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 23<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#9*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#6*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#9*4] @ 24
@ ldr r1,[sp,#6*4]
mov r0,r2,ror#7
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#8*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#1*4]
add r12,r12,r0
eor r0,r8,r8,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r8,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#8*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 24==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 24<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#10*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#7*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#10*4] @ 25
@ ldr r1,[sp,#7*4]
mov r0,r2,ror#7
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#9*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#2*4]
add r3,r3,r0
eor r0,r7,r7,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r7,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#9*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 25==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 25<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#11*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#8*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#11*4] @ 26
@ ldr r1,[sp,#8*4]
mov r0,r2,ror#7
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#10*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#3*4]
add r12,r12,r0
eor r0,r6,r6,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r6,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#10*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 26==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 26<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#12*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#9*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#12*4] @ 27
@ ldr r1,[sp,#9*4]
mov r0,r2,ror#7
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#11*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#4*4]
add r3,r3,r0
eor r0,r5,r5,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r5,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#11*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 27==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 27<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#13*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#10*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#13*4] @ 28
@ ldr r1,[sp,#10*4]
mov r0,r2,ror#7
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#12*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#5*4]
add r12,r12,r0
eor r0,r4,r4,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r4,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#12*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 28==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 28<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#14*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#11*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#14*4] @ 29
@ ldr r1,[sp,#11*4]
mov r0,r2,ror#7
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#13*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#6*4]
add r3,r3,r0
eor r0,r11,r11,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r11,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#13*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 29==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 29<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#15*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#12*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#15*4] @ 30
@ ldr r1,[sp,#12*4]
mov r0,r2,ror#7
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#14*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#7*4]
add r12,r12,r0
eor r0,r10,r10,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r10,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#14*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 30==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 30<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#0*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#13*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#0*4] @ 31
@ ldr r1,[sp,#13*4]
mov r0,r2,ror#7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#15*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#8*4]
add r3,r3,r0
eor r0,r9,r9,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r9,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#15*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 31==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 31<15
# if __ARM_ARCH>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#1*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#14*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH>=7
ite eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r3,[sp,#16*4] @ pull ctx
bne Lrounds_16_xx
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldr r0,[r3,#0]
ldr r2,[r3,#4]
ldr r12,[r3,#8]
add r4,r4,r0
ldr r0,[r3,#12]
add r5,r5,r2
ldr r2,[r3,#16]
add r6,r6,r12
ldr r12,[r3,#20]
add r7,r7,r0
ldr r0,[r3,#24]
add r8,r8,r2
ldr r2,[r3,#28]
add r9,r9,r12
ldr r1,[sp,#17*4] @ pull inp
ldr r12,[sp,#18*4] @ pull inp+len
add r10,r10,r0
add r11,r11,r2
stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
cmp r1,r12
sub r14,r14,#256 @ rewind Ktbl
bne Loop
add sp,sp,#19*4 @ destroy frame
#if __ARM_ARCH>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
#if __ARM_MAX_ARCH__>=7
LK256_shortcut_neon:
@ PC is 8 bytes ahead in Arm mode and 4 bytes ahead in Thumb mode.
#if defined(__thumb2__)
.word K256-(LK256_add_neon+4)
#else
.word K256-(LK256_add_neon+8)
#endif
.globl _sha256_block_data_order_neon
.private_extern _sha256_block_data_order_neon
#ifdef __thumb2__
.thumb_func _sha256_block_data_order_neon
#endif
.align 5
.skip 16
_sha256_block_data_order_neon:
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
sub r11,sp,#16*4+16
@ K256 is just at the boundary of being easily referenced by an ADR from
@ this function. In Arm mode, when building with __ARM_ARCH=6, it does
@ not fit. By moving code around, we could make it fit, but this is too
@ fragile. For simplicity, just load the offset from
@ .LK256_shortcut_neon.
@
@ TODO(davidben): adrl would avoid a load, but clang-assembler does not
@ support it. We might be able to emulate it with a macro, but Android's
@ did not work when I tried it.
@ https://android.googlesource.com/platform/ndk/+/refs/heads/master/docs/ClangMigration.md#arm
ldr r14,LK256_shortcut_neon
LK256_add_neon:
add r14,pc,r14
bic r11,r11,#15 @ align for 128-bit stores
mov r12,sp
mov sp,r11 @ alloca
add r2,r1,r2,lsl#6 @ len to point at the end of inp
vld1.8 {q0},[r1]!
vld1.8 {q1},[r1]!
vld1.8 {q2},[r1]!
vld1.8 {q3},[r1]!
vld1.32 {q8},[r14,:128]!
vld1.32 {q9},[r14,:128]!
vld1.32 {q10},[r14,:128]!
vld1.32 {q11},[r14,:128]!
vrev32.8 q0,q0 @ yes, even on
str r0,[sp,#64]
vrev32.8 q1,q1 @ big-endian
str r1,[sp,#68]
mov r1,sp
vrev32.8 q2,q2
str r2,[sp,#72]
vrev32.8 q3,q3
str r12,[sp,#76] @ save original sp
vadd.i32 q8,q8,q0
vadd.i32 q9,q9,q1
vst1.32 {q8},[r1,:128]!
vadd.i32 q10,q10,q2
vst1.32 {q9},[r1,:128]!
vadd.i32 q11,q11,q3
vst1.32 {q10},[r1,:128]!
vst1.32 {q11},[r1,:128]!
ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
sub r1,r1,#64
ldr r2,[sp,#0]
eor r12,r12,r12
eor r3,r5,r6
b L_00_48
.align 4
L_00_48:
vext.8 q8,q0,q1,#4
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
vext.8 q9,q2,q3,#4
add r4,r4,r12
and r2,r2,r8
eor r12,r0,r8,ror#19
vshr.u32 q10,q8,#7
eor r0,r4,r4,ror#11
eor r2,r2,r10
vadd.i32 q0,q0,q9
add r11,r11,r12,ror#6
eor r12,r4,r5
vshr.u32 q9,q8,#3
eor r0,r0,r4,ror#20
add r11,r11,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#4]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
veor q9,q9,q10
add r10,r10,r2
vsli.32 q11,q8,#14
eor r2,r8,r9
eor r0,r7,r7,ror#5
vshr.u32 d24,d7,#17
add r11,r11,r3
and r2,r2,r7
veor q9,q9,q11
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
vsli.32 d24,d7,#15
eor r2,r2,r9
add r10,r10,r3,ror#6
vshr.u32 d25,d7,#10
eor r3,r11,r4
eor r0,r0,r11,ror#20
vadd.i32 q0,q0,q9
add r10,r10,r2
ldr r2,[sp,#8]
veor d25,d25,d24
and r12,r12,r3
add r6,r6,r10
vshr.u32 d24,d7,#19
add r10,r10,r0,ror#2
eor r12,r12,r4
vsli.32 d24,d7,#13
add r9,r9,r2
eor r2,r7,r8
veor d25,d25,d24
eor r0,r6,r6,ror#5
add r10,r10,r12
vadd.i32 d0,d0,d25
and r2,r2,r6
eor r12,r0,r6,ror#19
vshr.u32 d24,d0,#17
eor r0,r10,r10,ror#11
eor r2,r2,r8
vsli.32 d24,d0,#15
add r9,r9,r12,ror#6
eor r12,r10,r11
vshr.u32 d25,d0,#10
eor r0,r0,r10,ror#20
add r9,r9,r2
veor d25,d25,d24
ldr r2,[sp,#12]
and r3,r3,r12
vshr.u32 d24,d0,#19
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
vld1.32 {q8},[r14,:128]!
add r8,r8,r2
vsli.32 d24,d0,#13
eor r2,r6,r7
eor r0,r5,r5,ror#5
veor d25,d25,d24
add r9,r9,r3
and r2,r2,r5
vadd.i32 d1,d1,d25
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
vadd.i32 q8,q8,q0
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#16]
and r12,r12,r3
add r4,r4,r8
vst1.32 {q8},[r1,:128]!
add r8,r8,r0,ror#2
eor r12,r12,r10
vext.8 q8,q1,q2,#4
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
vext.8 q9,q3,q0,#4
add r8,r8,r12
and r2,r2,r4
eor r12,r0,r4,ror#19
vshr.u32 q10,q8,#7
eor r0,r8,r8,ror#11
eor r2,r2,r6
vadd.i32 q1,q1,q9
add r7,r7,r12,ror#6
eor r12,r8,r9
vshr.u32 q9,q8,#3
eor r0,r0,r8,ror#20
add r7,r7,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#20]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
veor q9,q9,q10
add r6,r6,r2
vsli.32 q11,q8,#14
eor r2,r4,r5
eor r0,r11,r11,ror#5
vshr.u32 d24,d1,#17
add r7,r7,r3
and r2,r2,r11
veor q9,q9,q11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
vsli.32 d24,d1,#15
eor r2,r2,r5
add r6,r6,r3,ror#6
vshr.u32 d25,d1,#10
eor r3,r7,r8
eor r0,r0,r7,ror#20
vadd.i32 q1,q1,q9
add r6,r6,r2
ldr r2,[sp,#24]
veor d25,d25,d24
and r12,r12,r3
add r10,r10,r6
vshr.u32 d24,d1,#19
add r6,r6,r0,ror#2
eor r12,r12,r8
vsli.32 d24,d1,#13
add r5,r5,r2
eor r2,r11,r4
veor d25,d25,d24
eor r0,r10,r10,ror#5
add r6,r6,r12
vadd.i32 d2,d2,d25
and r2,r2,r10
eor r12,r0,r10,ror#19
vshr.u32 d24,d2,#17
eor r0,r6,r6,ror#11
eor r2,r2,r4
vsli.32 d24,d2,#15
add r5,r5,r12,ror#6
eor r12,r6,r7
vshr.u32 d25,d2,#10
eor r0,r0,r6,ror#20
add r5,r5,r2
veor d25,d25,d24
ldr r2,[sp,#28]
and r3,r3,r12
vshr.u32 d24,d2,#19
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
vld1.32 {q8},[r14,:128]!
add r4,r4,r2
vsli.32 d24,d2,#13
eor r2,r10,r11
eor r0,r9,r9,ror#5
veor d25,d25,d24
add r5,r5,r3
and r2,r2,r9
vadd.i32 d3,d3,d25
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
vadd.i32 q8,q8,q1
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#32]
and r12,r12,r3
add r8,r8,r4
vst1.32 {q8},[r1,:128]!
add r4,r4,r0,ror#2
eor r12,r12,r6
vext.8 q8,q2,q3,#4
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
vext.8 q9,q0,q1,#4
add r4,r4,r12
and r2,r2,r8
eor r12,r0,r8,ror#19
vshr.u32 q10,q8,#7
eor r0,r4,r4,ror#11
eor r2,r2,r10
vadd.i32 q2,q2,q9
add r11,r11,r12,ror#6
eor r12,r4,r5
vshr.u32 q9,q8,#3
eor r0,r0,r4,ror#20
add r11,r11,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#36]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
veor q9,q9,q10
add r10,r10,r2
vsli.32 q11,q8,#14
eor r2,r8,r9
eor r0,r7,r7,ror#5
vshr.u32 d24,d3,#17
add r11,r11,r3
and r2,r2,r7
veor q9,q9,q11
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
vsli.32 d24,d3,#15
eor r2,r2,r9
add r10,r10,r3,ror#6
vshr.u32 d25,d3,#10
eor r3,r11,r4
eor r0,r0,r11,ror#20
vadd.i32 q2,q2,q9
add r10,r10,r2
ldr r2,[sp,#40]
veor d25,d25,d24
and r12,r12,r3
add r6,r6,r10
vshr.u32 d24,d3,#19
add r10,r10,r0,ror#2
eor r12,r12,r4
vsli.32 d24,d3,#13
add r9,r9,r2
eor r2,r7,r8
veor d25,d25,d24
eor r0,r6,r6,ror#5
add r10,r10,r12
vadd.i32 d4,d4,d25
and r2,r2,r6
eor r12,r0,r6,ror#19
vshr.u32 d24,d4,#17
eor r0,r10,r10,ror#11
eor r2,r2,r8
vsli.32 d24,d4,#15
add r9,r9,r12,ror#6
eor r12,r10,r11
vshr.u32 d25,d4,#10
eor r0,r0,r10,ror#20
add r9,r9,r2
veor d25,d25,d24
ldr r2,[sp,#44]
and r3,r3,r12
vshr.u32 d24,d4,#19
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
vld1.32 {q8},[r14,:128]!
add r8,r8,r2
vsli.32 d24,d4,#13
eor r2,r6,r7
eor r0,r5,r5,ror#5
veor d25,d25,d24
add r9,r9,r3
and r2,r2,r5
vadd.i32 d5,d5,d25
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
vadd.i32 q8,q8,q2
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#48]
and r12,r12,r3
add r4,r4,r8
vst1.32 {q8},[r1,:128]!
add r8,r8,r0,ror#2
eor r12,r12,r10
vext.8 q8,q3,q0,#4
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
vext.8 q9,q1,q2,#4
add r8,r8,r12
and r2,r2,r4
eor r12,r0,r4,ror#19
vshr.u32 q10,q8,#7
eor r0,r8,r8,ror#11
eor r2,r2,r6
vadd.i32 q3,q3,q9
add r7,r7,r12,ror#6
eor r12,r8,r9
vshr.u32 q9,q8,#3
eor r0,r0,r8,ror#20
add r7,r7,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#52]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
veor q9,q9,q10
add r6,r6,r2
vsli.32 q11,q8,#14
eor r2,r4,r5
eor r0,r11,r11,ror#5
vshr.u32 d24,d5,#17
add r7,r7,r3
and r2,r2,r11
veor q9,q9,q11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
vsli.32 d24,d5,#15
eor r2,r2,r5
add r6,r6,r3,ror#6
vshr.u32 d25,d5,#10
eor r3,r7,r8
eor r0,r0,r7,ror#20
vadd.i32 q3,q3,q9
add r6,r6,r2
ldr r2,[sp,#56]
veor d25,d25,d24
and r12,r12,r3
add r10,r10,r6
vshr.u32 d24,d5,#19
add r6,r6,r0,ror#2
eor r12,r12,r8
vsli.32 d24,d5,#13
add r5,r5,r2
eor r2,r11,r4
veor d25,d25,d24
eor r0,r10,r10,ror#5
add r6,r6,r12
vadd.i32 d6,d6,d25
and r2,r2,r10
eor r12,r0,r10,ror#19
vshr.u32 d24,d6,#17
eor r0,r6,r6,ror#11
eor r2,r2,r4
vsli.32 d24,d6,#15
add r5,r5,r12,ror#6
eor r12,r6,r7
vshr.u32 d25,d6,#10
eor r0,r0,r6,ror#20
add r5,r5,r2
veor d25,d25,d24
ldr r2,[sp,#60]
and r3,r3,r12
vshr.u32 d24,d6,#19
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
vld1.32 {q8},[r14,:128]!
add r4,r4,r2
vsli.32 d24,d6,#13
eor r2,r10,r11
eor r0,r9,r9,ror#5
veor d25,d25,d24
add r5,r5,r3
and r2,r2,r9
vadd.i32 d7,d7,d25
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
vadd.i32 q8,q8,q3
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[r14]
and r12,r12,r3
add r8,r8,r4
vst1.32 {q8},[r1,:128]!
add r4,r4,r0,ror#2
eor r12,r12,r6
teq r2,#0 @ check for K256 terminator
ldr r2,[sp,#0]
sub r1,r1,#64
bne L_00_48
ldr r1,[sp,#68]
ldr r0,[sp,#72]
sub r14,r14,#256 @ rewind r14
teq r1,r0
it eq
subeq r1,r1,#64 @ avoid SEGV
vld1.8 {q0},[r1]! @ load next input block
vld1.8 {q1},[r1]!
vld1.8 {q2},[r1]!
vld1.8 {q3},[r1]!
it ne
strne r1,[sp,#68]
mov r1,sp
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
add r4,r4,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r8
eor r12,r0,r8,ror#19
eor r0,r4,r4,ror#11
eor r2,r2,r10
vrev32.8 q0,q0
add r11,r11,r12,ror#6
eor r12,r4,r5
eor r0,r0,r4,ror#20
add r11,r11,r2
vadd.i32 q8,q8,q0
ldr r2,[sp,#4]
and r3,r3,r12
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
add r10,r10,r2
eor r2,r8,r9
eor r0,r7,r7,ror#5
add r11,r11,r3
and r2,r2,r7
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
eor r2,r2,r9
add r10,r10,r3,ror#6
eor r3,r11,r4
eor r0,r0,r11,ror#20
add r10,r10,r2
ldr r2,[sp,#8]
and r12,r12,r3
add r6,r6,r10
add r10,r10,r0,ror#2
eor r12,r12,r4
add r9,r9,r2
eor r2,r7,r8
eor r0,r6,r6,ror#5
add r10,r10,r12
and r2,r2,r6
eor r12,r0,r6,ror#19
eor r0,r10,r10,ror#11
eor r2,r2,r8
add r9,r9,r12,ror#6
eor r12,r10,r11
eor r0,r0,r10,ror#20
add r9,r9,r2
ldr r2,[sp,#12]
and r3,r3,r12
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
add r8,r8,r2
eor r2,r6,r7
eor r0,r5,r5,ror#5
add r9,r9,r3
and r2,r2,r5
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#16]
and r12,r12,r3
add r4,r4,r8
add r8,r8,r0,ror#2
eor r12,r12,r10
vst1.32 {q8},[r1,:128]!
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
add r8,r8,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r4
eor r12,r0,r4,ror#19
eor r0,r8,r8,ror#11
eor r2,r2,r6
vrev32.8 q1,q1
add r7,r7,r12,ror#6
eor r12,r8,r9
eor r0,r0,r8,ror#20
add r7,r7,r2
vadd.i32 q8,q8,q1
ldr r2,[sp,#20]
and r3,r3,r12
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
add r6,r6,r2
eor r2,r4,r5
eor r0,r11,r11,ror#5
add r7,r7,r3
and r2,r2,r11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
eor r2,r2,r5
add r6,r6,r3,ror#6
eor r3,r7,r8
eor r0,r0,r7,ror#20
add r6,r6,r2
ldr r2,[sp,#24]
and r12,r12,r3
add r10,r10,r6
add r6,r6,r0,ror#2
eor r12,r12,r8
add r5,r5,r2
eor r2,r11,r4
eor r0,r10,r10,ror#5
add r6,r6,r12
and r2,r2,r10
eor r12,r0,r10,ror#19
eor r0,r6,r6,ror#11
eor r2,r2,r4
add r5,r5,r12,ror#6
eor r12,r6,r7
eor r0,r0,r6,ror#20
add r5,r5,r2
ldr r2,[sp,#28]
and r3,r3,r12
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
add r4,r4,r2
eor r2,r10,r11
eor r0,r9,r9,ror#5
add r5,r5,r3
and r2,r2,r9
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#32]
and r12,r12,r3
add r8,r8,r4
add r4,r4,r0,ror#2
eor r12,r12,r6
vst1.32 {q8},[r1,:128]!
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
add r4,r4,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r8
eor r12,r0,r8,ror#19
eor r0,r4,r4,ror#11
eor r2,r2,r10
vrev32.8 q2,q2
add r11,r11,r12,ror#6
eor r12,r4,r5
eor r0,r0,r4,ror#20
add r11,r11,r2
vadd.i32 q8,q8,q2
ldr r2,[sp,#36]
and r3,r3,r12
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
add r10,r10,r2
eor r2,r8,r9
eor r0,r7,r7,ror#5
add r11,r11,r3
and r2,r2,r7
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
eor r2,r2,r9
add r10,r10,r3,ror#6
eor r3,r11,r4
eor r0,r0,r11,ror#20
add r10,r10,r2
ldr r2,[sp,#40]
and r12,r12,r3
add r6,r6,r10
add r10,r10,r0,ror#2
eor r12,r12,r4
add r9,r9,r2
eor r2,r7,r8
eor r0,r6,r6,ror#5
add r10,r10,r12
and r2,r2,r6
eor r12,r0,r6,ror#19
eor r0,r10,r10,ror#11
eor r2,r2,r8
add r9,r9,r12,ror#6
eor r12,r10,r11
eor r0,r0,r10,ror#20
add r9,r9,r2
ldr r2,[sp,#44]
and r3,r3,r12
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
add r8,r8,r2
eor r2,r6,r7
eor r0,r5,r5,ror#5
add r9,r9,r3
and r2,r2,r5
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#48]
and r12,r12,r3
add r4,r4,r8
add r8,r8,r0,ror#2
eor r12,r12,r10
vst1.32 {q8},[r1,:128]!
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
add r8,r8,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r4
eor r12,r0,r4,ror#19
eor r0,r8,r8,ror#11
eor r2,r2,r6
vrev32.8 q3,q3
add r7,r7,r12,ror#6
eor r12,r8,r9
eor r0,r0,r8,ror#20
add r7,r7,r2
vadd.i32 q8,q8,q3
ldr r2,[sp,#52]
and r3,r3,r12
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
add r6,r6,r2
eor r2,r4,r5
eor r0,r11,r11,ror#5
add r7,r7,r3
and r2,r2,r11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
eor r2,r2,r5
add r6,r6,r3,ror#6
eor r3,r7,r8
eor r0,r0,r7,ror#20
add r6,r6,r2
ldr r2,[sp,#56]
and r12,r12,r3
add r10,r10,r6
add r6,r6,r0,ror#2
eor r12,r12,r8
add r5,r5,r2
eor r2,r11,r4
eor r0,r10,r10,ror#5
add r6,r6,r12
and r2,r2,r10
eor r12,r0,r10,ror#19
eor r0,r6,r6,ror#11
eor r2,r2,r4
add r5,r5,r12,ror#6
eor r12,r6,r7
eor r0,r0,r6,ror#20
add r5,r5,r2
ldr r2,[sp,#60]
and r3,r3,r12
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
add r4,r4,r2
eor r2,r10,r11
eor r0,r9,r9,ror#5
add r5,r5,r3
and r2,r2,r9
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#64]
and r12,r12,r3
add r8,r8,r4
add r4,r4,r0,ror#2
eor r12,r12,r6
vst1.32 {q8},[r1,:128]!
ldr r0,[r2,#0]
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldr r12,[r2,#4]
ldr r3,[r2,#8]
ldr r1,[r2,#12]
add r4,r4,r0 @ accumulate
ldr r0,[r2,#16]
add r5,r5,r12
ldr r12,[r2,#20]
add r6,r6,r3
ldr r3,[r2,#24]
add r7,r7,r1
ldr r1,[r2,#28]
add r8,r8,r0
str r4,[r2],#4
add r9,r9,r12
str r5,[r2],#4
add r10,r10,r3
str r6,[r2],#4
add r11,r11,r1
str r7,[r2],#4
stmia r2,{r8,r9,r10,r11}
ittte ne
movne r1,sp
ldrne r2,[sp,#0]
eorne r12,r12,r12
ldreq sp,[sp,#76] @ restore original sp
itt ne
eorne r3,r5,r6
bne L_00_48
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
# if defined(__thumb2__)
# define INST(a,b,c,d) .byte c,d|0xc,a,b
# else
# define INST(a,b,c,d) .byte a,b,c,d
# endif
LK256_shortcut_hw:
@ PC is 8 bytes ahead in Arm mode and 4 bytes ahead in Thumb mode.
#if defined(__thumb2__)
.word K256-(LK256_add_hw+4)
#else
.word K256-(LK256_add_hw+8)
#endif
.globl _sha256_block_data_order_hw
.private_extern _sha256_block_data_order_hw
#ifdef __thumb2__
.thumb_func _sha256_block_data_order_hw
#endif
.align 5
_sha256_block_data_order_hw:
@ K256 is too far to reference from one ADR command in Thumb mode. In
@ Arm mode, we could make it fit by aligning the ADR offset to a 64-byte
@ boundary. For simplicity, just load the offset from .LK256_shortcut_hw.
ldr r3,LK256_shortcut_hw
LK256_add_hw:
add r3,pc,r3
vld1.32 {q0,q1},[r0]
add r2,r1,r2,lsl#6 @ len to point at the end of inp
b Loop_v8
.align 4
Loop_v8:
vld1.8 {q8,q9},[r1]!
vld1.8 {q10,q11},[r1]!
vld1.32 {q12},[r3]!
vrev32.8 q8,q8
vrev32.8 q9,q9
vrev32.8 q10,q10
vrev32.8 q11,q11
vmov q14,q0 @ offload
vmov q15,q1
teq r1,r2
vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q8
INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
vmov q2,q0
INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q9
INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
vmov q2,q0
INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q10
INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
vmov q2,q0
INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q11
INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
vmov q2,q0
INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q8
INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
vmov q2,q0
INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q9
INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
vmov q2,q0
INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q10
INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
vmov q2,q0
INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q11
INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
vmov q2,q0
INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q8
INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
vmov q2,q0
INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q9
INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
vmov q2,q0
INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q10
INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
vmov q2,q0
INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q11
INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
vmov q2,q0
INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q8
vmov q2,q0
INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q9
vmov q2,q0
INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
vld1.32 {q13},[r3]
vadd.i32 q12,q12,q10
sub r3,r3,#256-16 @ rewind
vmov q2,q0
INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
vadd.i32 q13,q13,q11
vmov q2,q0
INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
vadd.i32 q0,q0,q14
vadd.i32 q1,q1,q15
it ne
bne Loop_v8
vst1.32 {q0,q1},[r0]
bx lr @ bx lr
#endif
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
wlsfx/bnbb
| 42,677
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/fipsmodule/sha512-armv4.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the OpenSSL license (the "License"). You may not use
@ this file except in compliance with the License. You can obtain a copy
@ in the file LICENSE in the source distribution or at
@ https://www.openssl.org/source/license.html
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@
@ Permission to use under GPL terms is granted.
@ ====================================================================
@ SHA512 block procedure for ARMv4. September 2007.
@ This code is ~4.5 (four and a half) times faster than code generated
@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
@ Xscale PXA250 core].
@
@ July 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
@ Cortex A8 core and ~40 cycles per processed byte.
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 7%
@ improvement on Coxtex A8 core and ~38 cycles per byte.
@ March 2011.
@
@ Add NEON implementation. On Cortex A8 it was measured to process
@ one byte in 23.3 cycles or ~60% faster than integer-only code.
@ August 2012.
@
@ Improve NEON performance by 12% on Snapdragon S4. In absolute
@ terms it's 22.6 cycles per byte, which is disappointing result.
@ Technical writers asserted that 3-way S4 pipeline can sustain
@ multiple NEON instructions per cycle, but dual NEON issue could
@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
@ for further details. On side note Cortex-A15 processes one byte in
@ 16 cycles.
@ Byte order [in]dependence. =========================================
@
@ Originally caller was expected to maintain specific *dword* order in
@ h[0-7], namely with most significant dword at *lower* address, which
@ was reflected in below two parameters as 0 and 4. Now caller is
@ expected to maintain native byte order for whole 64-bit values.
#ifndef __KERNEL__
# include <openssl/arm_arch.h>
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
#else
# define __ARM_MAX_ARCH__ 7
# define VFP_ABI_PUSH
# define VFP_ABI_POP
#endif
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
#ifdef __ARMEL__
# define LO 0
# define HI 4
# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
#else
# define HI 0
# define LO 4
# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
#endif
.text
#if defined(__thumb2__)
.syntax unified
.thumb
# define adrl adr
#else
.code 32
#endif
.align 5
K512:
WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.globl _sha512_block_data_order_nohw
.private_extern _sha512_block_data_order_nohw
#ifdef __thumb2__
.thumb_func _sha512_block_data_order_nohw
#endif
_sha512_block_data_order_nohw:
add r2,r1,r2,lsl#7 @ len to point at the end of inp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
adr r14,K512
sub sp,sp,#9*8
ldr r7,[r0,#32+LO]
ldr r8,[r0,#32+HI]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
Loop:
str r9, [sp,#48+0]
str r10, [sp,#48+4]
str r11, [sp,#56+0]
str r12, [sp,#56+4]
ldr r5,[r0,#0+LO]
ldr r6,[r0,#0+HI]
ldr r3,[r0,#8+LO]
ldr r4,[r0,#8+HI]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
str r3,[sp,#8+0]
str r4,[sp,#8+4]
str r9, [sp,#16+0]
str r10, [sp,#16+4]
str r11, [sp,#24+0]
str r12, [sp,#24+4]
ldr r3,[r0,#40+LO]
ldr r4,[r0,#40+HI]
str r3,[sp,#40+0]
str r4,[sp,#40+4]
L00_15:
#if __ARM_ARCH<7
ldrb r3,[r1,#7]
ldrb r9, [r1,#6]
ldrb r10, [r1,#5]
ldrb r11, [r1,#4]
ldrb r4,[r1,#3]
ldrb r12, [r1,#2]
orr r3,r3,r9,lsl#8
ldrb r9, [r1,#1]
orr r3,r3,r10,lsl#16
ldrb r10, [r1],#8
orr r3,r3,r11,lsl#24
orr r4,r4,r12,lsl#8
orr r4,r4,r9,lsl#16
orr r4,r4,r10,lsl#24
#else
ldr r3,[r1,#4]
ldr r4,[r1],#8
#ifdef __ARMEL__
rev r3,r3
rev r4,r4
#endif
#endif
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#148
ldr r12,[sp,#16+0] @ c.lo
#if __ARM_ARCH>=7
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
tst r14,#1
beq L00_15
ldr r9,[sp,#184+0]
ldr r10,[sp,#184+4]
bic r14,r14,#1
L16_79:
@ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
@ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
@ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
mov r3,r9,lsr#1
ldr r11,[sp,#80+0]
mov r4,r10,lsr#1
ldr r12,[sp,#80+4]
eor r3,r3,r10,lsl#31
eor r4,r4,r9,lsl#31
eor r3,r3,r9,lsr#8
eor r4,r4,r10,lsr#8
eor r3,r3,r10,lsl#24
eor r4,r4,r9,lsl#24
eor r3,r3,r9,lsr#7
eor r4,r4,r10,lsr#7
eor r3,r3,r10,lsl#25
@ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
@ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
@ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
mov r9,r11,lsr#19
mov r10,r12,lsr#19
eor r9,r9,r12,lsl#13
eor r10,r10,r11,lsl#13
eor r9,r9,r12,lsr#29
eor r10,r10,r11,lsr#29
eor r9,r9,r11,lsl#3
eor r10,r10,r12,lsl#3
eor r9,r9,r11,lsr#6
eor r10,r10,r12,lsr#6
ldr r11,[sp,#120+0]
eor r9,r9,r12,lsl#26
ldr r12,[sp,#120+4]
adds r3,r3,r9
ldr r9,[sp,#192+0]
adc r4,r4,r10
ldr r10,[sp,#192+4]
adds r3,r3,r11
adc r4,r4,r12
adds r3,r3,r9
adc r4,r4,r10
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#23
ldr r12,[sp,#16+0] @ c.lo
#if __ARM_ARCH>=7
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
#if __ARM_ARCH>=7
ittt eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r9,[sp,#184+0]
ldreq r10,[sp,#184+4]
beq L16_79
bic r14,r14,#1
ldr r3,[sp,#8+0]
ldr r4,[sp,#8+4]
ldr r9, [r0,#0+LO]
ldr r10, [r0,#0+HI]
ldr r11, [r0,#8+LO]
ldr r12, [r0,#8+HI]
adds r9,r5,r9
str r9, [r0,#0+LO]
adc r10,r6,r10
str r10, [r0,#0+HI]
adds r11,r3,r11
str r11, [r0,#8+LO]
adc r12,r4,r12
str r12, [r0,#8+HI]
ldr r5,[sp,#16+0]
ldr r6,[sp,#16+4]
ldr r3,[sp,#24+0]
ldr r4,[sp,#24+4]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
adds r9,r5,r9
str r9, [r0,#16+LO]
adc r10,r6,r10
str r10, [r0,#16+HI]
adds r11,r3,r11
str r11, [r0,#24+LO]
adc r12,r4,r12
str r12, [r0,#24+HI]
ldr r3,[sp,#40+0]
ldr r4,[sp,#40+4]
ldr r9, [r0,#32+LO]
ldr r10, [r0,#32+HI]
ldr r11, [r0,#40+LO]
ldr r12, [r0,#40+HI]
adds r7,r7,r9
str r7,[r0,#32+LO]
adc r8,r8,r10
str r8,[r0,#32+HI]
adds r11,r3,r11
str r11, [r0,#40+LO]
adc r12,r4,r12
str r12, [r0,#40+HI]
ldr r5,[sp,#48+0]
ldr r6,[sp,#48+4]
ldr r3,[sp,#56+0]
ldr r4,[sp,#56+4]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
adds r9,r5,r9
str r9, [r0,#48+LO]
adc r10,r6,r10
str r10, [r0,#48+HI]
adds r11,r3,r11
str r11, [r0,#56+LO]
adc r12,r4,r12
str r12, [r0,#56+HI]
add sp,sp,#640
sub r14,r14,#640
teq r1,r2
bne Loop
add sp,sp,#8*9 @ destroy frame
#if __ARM_ARCH>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
#if __ARM_MAX_ARCH__>=7
.globl _sha512_block_data_order_neon
.private_extern _sha512_block_data_order_neon
#ifdef __thumb2__
.thumb_func _sha512_block_data_order_neon
#endif
.align 4
_sha512_block_data_order_neon:
dmb @ errata #451034 on early Cortex A8
add r2,r1,r2,lsl#7 @ len to point at the end of inp
adr r3,K512
VFP_ABI_PUSH
vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context
Loop_neon:
vshr.u64 d24,d20,#14 @ 0
#if 0<16
vld1.64 {d0},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 0>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 0<16 && defined(__ARMEL__)
vrev64.8 d0,d0
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 1
#if 1<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 1>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 1<16 && defined(__ARMEL__)
vrev64.8 d1,d1
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 2
#if 2<16
vld1.64 {d2},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 2>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 2<16 && defined(__ARMEL__)
vrev64.8 d2,d2
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 3
#if 3<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 3>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 3<16 && defined(__ARMEL__)
vrev64.8 d3,d3
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 4
#if 4<16
vld1.64 {d4},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 4>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 4<16 && defined(__ARMEL__)
vrev64.8 d4,d4
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 5
#if 5<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 5>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 5<16 && defined(__ARMEL__)
vrev64.8 d5,d5
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 6
#if 6<16
vld1.64 {d6},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 6>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 6<16 && defined(__ARMEL__)
vrev64.8 d6,d6
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 7
#if 7<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 7>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 7<16 && defined(__ARMEL__)
vrev64.8 d7,d7
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 d24,d20,#14 @ 8
#if 8<16
vld1.64 {d8},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 8>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 8<16 && defined(__ARMEL__)
vrev64.8 d8,d8
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 9
#if 9<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 9>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 9<16 && defined(__ARMEL__)
vrev64.8 d9,d9
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 10
#if 10<16
vld1.64 {d10},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 10>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 10<16 && defined(__ARMEL__)
vrev64.8 d10,d10
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 11
#if 11<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 11>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 11<16 && defined(__ARMEL__)
vrev64.8 d11,d11
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 12
#if 12<16
vld1.64 {d12},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 12>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 12<16 && defined(__ARMEL__)
vrev64.8 d12,d12
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 13
#if 13<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 13>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 13<16 && defined(__ARMEL__)
vrev64.8 d13,d13
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 14
#if 14<16
vld1.64 {d14},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 14>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 14<16 && defined(__ARMEL__)
vrev64.8 d14,d14
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 15
#if 15<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 15>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 15<16 && defined(__ARMEL__)
vrev64.8 d15,d15
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
mov r12,#4
L16_79_neon:
subs r12,#1
vshr.u64 q12,q7,#19
vshr.u64 q13,q7,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q7,#6
vsli.64 q12,q7,#45
vext.8 q14,q0,q1,#8 @ X[i+1]
vsli.64 q13,q7,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q0,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q4,q5,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q0,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q0,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 16<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 17
#if 17<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 17>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 17<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q0,#19
vshr.u64 q13,q0,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q0,#6
vsli.64 q12,q0,#45
vext.8 q14,q1,q2,#8 @ X[i+1]
vsli.64 q13,q0,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q1,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q5,q6,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q1,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q1,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 18<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 19
#if 19<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 19>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 19<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q1,#19
vshr.u64 q13,q1,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q1,#6
vsli.64 q12,q1,#45
vext.8 q14,q2,q3,#8 @ X[i+1]
vsli.64 q13,q1,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q2,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q6,q7,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q2,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q2,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 20<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 21
#if 21<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 21>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 21<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q2,#19
vshr.u64 q13,q2,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q2,#6
vsli.64 q12,q2,#45
vext.8 q14,q3,q4,#8 @ X[i+1]
vsli.64 q13,q2,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q3,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q7,q0,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q3,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q3,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 22<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 23
#if 23<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 23>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 23<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 q12,q3,#19
vshr.u64 q13,q3,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q3,#6
vsli.64 q12,q3,#45
vext.8 q14,q4,q5,#8 @ X[i+1]
vsli.64 q13,q3,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q4,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q0,q1,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q4,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q4,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 24<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 25
#if 25<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 25>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 25<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q4,#19
vshr.u64 q13,q4,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q4,#6
vsli.64 q12,q4,#45
vext.8 q14,q5,q6,#8 @ X[i+1]
vsli.64 q13,q4,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q5,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q1,q2,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q5,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q5,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 26<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 27
#if 27<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 27>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 27<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q5,#19
vshr.u64 q13,q5,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q5,#6
vsli.64 q12,q5,#45
vext.8 q14,q6,q7,#8 @ X[i+1]
vsli.64 q13,q5,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q6,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q2,q3,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q6,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q6,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 28<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 29
#if 29<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 29>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 29<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q6,#19
vshr.u64 q13,q6,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q6,#6
vsli.64 q12,q6,#45
vext.8 q14,q7,q0,#8 @ X[i+1]
vsli.64 q13,q6,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q7,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q3,q4,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q7,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q7,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 30<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 31
#if 31<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 31>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 31<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
bne L16_79_neon
vadd.i64 d16,d30 @ h+=Maj from the past
vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp
vadd.i64 q8,q12 @ vectorized accumulate
vadd.i64 q9,q13
vadd.i64 q10,q14
vadd.i64 q11,q15
vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context
teq r1,r2
sub r3,#640 @ rewind K512
bne Loop_neon
VFP_ABI_POP
bx lr @ .word 0xe12fff1e
#endif
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
wlsfx/bnbb
| 6,942
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/fipsmodule/ghashv8-armx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
#include <openssl/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.code 32
#undef __thumb2__
.globl _gcm_init_v8
.private_extern _gcm_init_v8
#ifdef __thumb2__
.thumb_func _gcm_init_v8
#endif
.align 4
_gcm_init_v8:
AARCH64_VALID_CALL_TARGET
vld1.64 {q9},[r1] @ load input H
vmov.i8 q11,#0xe1
vshl.i64 q11,q11,#57 @ 0xc2.0
vext.8 q3,q9,q9,#8
vshr.u64 q10,q11,#63
vdup.32 q9,d18[1]
vext.8 q8,q10,q11,#8 @ t0=0xc2....01
vshr.u64 q10,q3,#63
vshr.s32 q9,q9,#31 @ broadcast carry bit
vand q10,q10,q8
vshl.i64 q3,q3,#1
vext.8 q10,q10,q10,#8
vand q8,q8,q9
vorr q3,q3,q10 @ H<<<=1
veor q12,q3,q8 @ twisted H
vext.8 q12, q12, q12, #8
vst1.64 {q12},[r0]! @ store Htable[0]
@ calculate H^2
vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing
.byte 0xa9,0x0e,0xa9,0xf2 @ pmull2 q0,q12,q12
veor q8,q8,q12
.byte 0xa8,0x4e,0xa8,0xf2 @ pmull q2,q12,q12
.byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2
veor q1,q1,q9
veor q1,q1,q10
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase
vmov d4,d3 @ Xh|Xm - 256-bit result
vmov d3,d0 @ Xm is rotated Xl
veor q0,q1,q10
vext.8 q10,q0,q0,#8 @ 2nd phase
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
veor q10,q10,q2
veor q9,q0,q10
vext.8 q14,q9,q9,#8 @ Karatsuba pre-processing
veor q9,q9,q14
vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed
vst1.64 {q13},[r0]! @ store Htable[1..2]
vst1.64 {q14},[r0]! @ store Htable[1..2]
bx lr
.globl _gcm_gmult_v8
.private_extern _gcm_gmult_v8
#ifdef __thumb2__
.thumb_func _gcm_gmult_v8
#endif
.align 4
_gcm_gmult_v8:
AARCH64_VALID_CALL_TARGET
vld1.64 {q9},[r0] @ load Xi
vmov.i8 q11,#0xe1
vld1.64 {q12,q13},[r1] @ load twisted H, ...
vext.8 q12,q12,q12,#8
vshl.u64 q11,q11,#57
#ifndef __ARMEB__
vrev64.8 q9,q9
#endif
vext.8 q3,q9,q9,#8
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
veor q9,q9,q3 @ Karatsuba pre-processing
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2
veor q1,q1,q9
veor q1,q1,q10
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
vmov d4,d3 @ Xh|Xm - 256-bit result
vmov d3,d0 @ Xm is rotated Xl
veor q0,q1,q10
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
veor q10,q10,q2
veor q0,q0,q10
#ifndef __ARMEB__
vrev64.8 q0,q0
#endif
vext.8 q0,q0,q0,#8
vst1.64 {q0},[r0] @ write out Xi
bx lr
.globl _gcm_ghash_v8
.private_extern _gcm_ghash_v8
#ifdef __thumb2__
.thumb_func _gcm_ghash_v8
#endif
.align 4
_gcm_ghash_v8:
AARCH64_VALID_CALL_TARGET
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
vld1.64 {q0},[r0] @ load [rotated] Xi
@ "[rotated]" means that
@ loaded value would have
@ to be rotated in order to
@ make it appear as in
@ algorithm specification
subs r3,r3,#32 @ see if r3 is 32 or larger
mov r12,#16 @ r12 is used as post-
@ increment for input pointer;
@ as loop is modulo-scheduled
@ r12 is zeroed just in time
@ to preclude overstepping
@ inp[len], which means that
@ last block[s] are actually
@ loaded twice, but last
@ copy is not processed
vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2
vext.8 q12,q12,q12,#8
vmov.i8 q11,#0xe1
vld1.64 {q14},[r1]
vext.8 q14,q14,q14,#8
moveq r12,#0 @ is it time to zero r12?
vext.8 q0,q0,q0,#8 @ rotate Xi
vld1.64 {q8},[r2]! @ load [rotated] I[0]
vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant
#ifndef __ARMEB__
vrev64.8 q8,q8
vrev64.8 q0,q0
#endif
vext.8 q3,q8,q8,#8 @ rotate I[0]
blo Lodd_tail_v8 @ r3 was less than 32
vld1.64 {q9},[r2],r12 @ load [rotated] I[1]
#ifndef __ARMEB__
vrev64.8 q9,q9
#endif
vext.8 q7,q9,q9,#8
veor q3,q3,q0 @ I[i]^=Xi
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
veor q9,q9,q7 @ Karatsuba pre-processing
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
b Loop_mod2x_v8
.align 4
Loop_mod2x_v8:
vext.8 q10,q3,q3,#8
subs r3,r3,#32 @ is there more data?
.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
movlo r12,#0 @ is it time to zero r12?
.byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9
veor q10,q10,q3 @ Karatsuba pre-processing
.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
veor q0,q0,q4 @ accumulate
.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2]
veor q2,q2,q6
moveq r12,#0 @ is it time to zero r12?
veor q1,q1,q5
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2
veor q1,q1,q9
vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3]
#ifndef __ARMEB__
vrev64.8 q8,q8
#endif
veor q1,q1,q10
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
#ifndef __ARMEB__
vrev64.8 q9,q9
#endif
vmov d4,d3 @ Xh|Xm - 256-bit result
vmov d3,d0 @ Xm is rotated Xl
vext.8 q7,q9,q9,#8
vext.8 q3,q8,q8,#8
veor q0,q1,q10
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
veor q3,q3,q2 @ accumulate q3 early
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
veor q3,q3,q10
veor q9,q9,q7 @ Karatsuba pre-processing
veor q3,q3,q0
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
bhs Loop_mod2x_v8 @ there was at least 32 more bytes
veor q2,q2,q10
vext.8 q3,q8,q8,#8 @ re-construct q3
adds r3,r3,#32 @ re-construct r3
veor q0,q0,q2 @ re-construct q0
beq Ldone_v8 @ is r3 zero?
Lodd_tail_v8:
vext.8 q10,q0,q0,#8
veor q3,q3,q0 @ inp^=Xi
veor q9,q8,q10 @ q9 is rotated inp^Xi
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
veor q9,q9,q3 @ Karatsuba pre-processing
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2
veor q1,q1,q9
veor q1,q1,q10
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
vmov d4,d3 @ Xh|Xm - 256-bit result
vmov d3,d0 @ Xm is rotated Xl
veor q0,q1,q10
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
veor q10,q10,q2
veor q0,q0,q10
Ldone_v8:
#ifndef __ARMEB__
vrev64.8 q0,q0
#endif
vext.8 q0,q0,q0,#8
vst1.64 {q0},[r0] @ write out Xi
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
bx lr
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
wlsfx/bnbb
| 6,253
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/fipsmodule/ghash-armv4.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
#include <openssl/arm_arch.h>
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL
@ instructions are in aesv8-armx.pl.)
.text
#if defined(__thumb2__) || defined(__clang__)
.syntax unified
#define ldrplb ldrbpl
#define ldrneb ldrbne
#endif
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
#if __ARM_MAX_ARCH__>=7
.globl _gcm_init_neon
.private_extern _gcm_init_neon
#ifdef __thumb2__
.thumb_func _gcm_init_neon
#endif
.align 4
_gcm_init_neon:
vld1.64 d7,[r1]! @ load H
vmov.i8 q8,#0xe1
vld1.64 d6,[r1]
vshl.i64 d17,#57
vshr.u64 d16,#63 @ t0=0xc2....01
vdup.8 q9,d7[7]
vshr.u64 d26,d6,#63
vshr.s8 q9,#7 @ broadcast carry bit
vshl.i64 q3,q3,#1
vand q8,q8,q9
vorr d7,d26 @ H<<<=1
veor q3,q3,q8 @ twisted H
vstmia r0,{q3}
bx lr @ bx lr
.globl _gcm_gmult_neon
.private_extern _gcm_gmult_neon
#ifdef __thumb2__
.thumb_func _gcm_gmult_neon
#endif
.align 4
_gcm_gmult_neon:
vld1.64 d7,[r0]! @ load Xi
vld1.64 d6,[r0]!
vmov.i64 d29,#0x0000ffffffffffff
vldmia r1,{d26,d27} @ load twisted H
vmov.i64 d30,#0x00000000ffffffff
#ifdef __ARMEL__
vrev64.8 q3,q3
#endif
vmov.i64 d31,#0x000000000000ffff
veor d28,d26,d27 @ Karatsuba pre-processing
mov r3,#16
b Lgmult_neon
.globl _gcm_ghash_neon
.private_extern _gcm_ghash_neon
#ifdef __thumb2__
.thumb_func _gcm_ghash_neon
#endif
.align 4
_gcm_ghash_neon:
vld1.64 d1,[r0]! @ load Xi
vld1.64 d0,[r0]!
vmov.i64 d29,#0x0000ffffffffffff
vldmia r1,{d26,d27} @ load twisted H
vmov.i64 d30,#0x00000000ffffffff
#ifdef __ARMEL__
vrev64.8 q0,q0
#endif
vmov.i64 d31,#0x000000000000ffff
veor d28,d26,d27 @ Karatsuba pre-processing
Loop_neon:
vld1.64 d7,[r2]! @ load inp
vld1.64 d6,[r2]!
#ifdef __ARMEL__
vrev64.8 q3,q3
#endif
veor q3,q0 @ inp^=Xi
Lgmult_neon:
vext.8 d16, d26, d26, #1 @ A1
vmull.p8 q8, d16, d6 @ F = A1*B
vext.8 d0, d6, d6, #1 @ B1
vmull.p8 q0, d26, d0 @ E = A*B1
vext.8 d18, d26, d26, #2 @ A2
vmull.p8 q9, d18, d6 @ H = A2*B
vext.8 d22, d6, d6, #2 @ B2
vmull.p8 q11, d26, d22 @ G = A*B2
vext.8 d20, d26, d26, #3 @ A3
veor q8, q8, q0 @ L = E + F
vmull.p8 q10, d20, d6 @ J = A3*B
vext.8 d0, d6, d6, #3 @ B3
veor q9, q9, q11 @ M = G + H
vmull.p8 q0, d26, d0 @ I = A*B3
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
vand d17, d17, d29
vext.8 d22, d6, d6, #4 @ B4
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
vand d19, d19, d30
vmull.p8 q11, d26, d22 @ K = A*B4
veor q10, q10, q0 @ N = I + J
veor d16, d16, d17
veor d18, d18, d19
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
vand d21, d21, d31
vext.8 q8, q8, q8, #15
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
vmov.i64 d23, #0
vext.8 q9, q9, q9, #14
veor d20, d20, d21
vmull.p8 q0, d26, d6 @ D = A*B
vext.8 q11, q11, q11, #12
vext.8 q10, q10, q10, #13
veor q8, q8, q9
veor q10, q10, q11
veor q0, q0, q8
veor q0, q0, q10
veor d6,d6,d7 @ Karatsuba pre-processing
vext.8 d16, d28, d28, #1 @ A1
vmull.p8 q8, d16, d6 @ F = A1*B
vext.8 d2, d6, d6, #1 @ B1
vmull.p8 q1, d28, d2 @ E = A*B1
vext.8 d18, d28, d28, #2 @ A2
vmull.p8 q9, d18, d6 @ H = A2*B
vext.8 d22, d6, d6, #2 @ B2
vmull.p8 q11, d28, d22 @ G = A*B2
vext.8 d20, d28, d28, #3 @ A3
veor q8, q8, q1 @ L = E + F
vmull.p8 q10, d20, d6 @ J = A3*B
vext.8 d2, d6, d6, #3 @ B3
veor q9, q9, q11 @ M = G + H
vmull.p8 q1, d28, d2 @ I = A*B3
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
vand d17, d17, d29
vext.8 d22, d6, d6, #4 @ B4
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
vand d19, d19, d30
vmull.p8 q11, d28, d22 @ K = A*B4
veor q10, q10, q1 @ N = I + J
veor d16, d16, d17
veor d18, d18, d19
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
vand d21, d21, d31
vext.8 q8, q8, q8, #15
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
vmov.i64 d23, #0
vext.8 q9, q9, q9, #14
veor d20, d20, d21
vmull.p8 q1, d28, d6 @ D = A*B
vext.8 q11, q11, q11, #12
vext.8 q10, q10, q10, #13
veor q8, q8, q9
veor q10, q10, q11
veor q1, q1, q8
veor q1, q1, q10
vext.8 d16, d27, d27, #1 @ A1
vmull.p8 q8, d16, d7 @ F = A1*B
vext.8 d4, d7, d7, #1 @ B1
vmull.p8 q2, d27, d4 @ E = A*B1
vext.8 d18, d27, d27, #2 @ A2
vmull.p8 q9, d18, d7 @ H = A2*B
vext.8 d22, d7, d7, #2 @ B2
vmull.p8 q11, d27, d22 @ G = A*B2
vext.8 d20, d27, d27, #3 @ A3
veor q8, q8, q2 @ L = E + F
vmull.p8 q10, d20, d7 @ J = A3*B
vext.8 d4, d7, d7, #3 @ B3
veor q9, q9, q11 @ M = G + H
vmull.p8 q2, d27, d4 @ I = A*B3
veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
vand d17, d17, d29
vext.8 d22, d7, d7, #4 @ B4
veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
vand d19, d19, d30
vmull.p8 q11, d27, d22 @ K = A*B4
veor q10, q10, q2 @ N = I + J
veor d16, d16, d17
veor d18, d18, d19
veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
vand d21, d21, d31
vext.8 q8, q8, q8, #15
veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
vmov.i64 d23, #0
vext.8 q9, q9, q9, #14
veor d20, d20, d21
vmull.p8 q2, d27, d7 @ D = A*B
vext.8 q11, q11, q11, #12
vext.8 q10, q10, q10, #13
veor q8, q8, q9
veor q10, q10, q11
veor q2, q2, q8
veor q2, q2, q10
veor q1,q1,q0 @ Karatsuba post-processing
veor q1,q1,q2
veor d1,d1,d2
veor d4,d4,d3 @ Xh|Xl - 256-bit result
@ equivalent of reduction_avx from ghash-x86_64.pl
vshl.i64 q9,q0,#57 @ 1st phase
vshl.i64 q10,q0,#62
veor q10,q10,q9 @
vshl.i64 q9,q0,#63
veor q10, q10, q9 @
veor d1,d1,d20 @
veor d4,d4,d21
vshr.u64 q10,q0,#1 @ 2nd phase
veor q2,q2,q0
veor q0,q0,q10 @
vshr.u64 q10,q10,#6
vshr.u64 q0,q0,#1 @
veor q0,q0,q2 @
veor q0,q0,q10 @
subs r3,#16
bne Loop_neon
#ifdef __ARMEL__
vrev64.8 q0,q0
#endif
sub r0,#16
vst1.64 d1,[r0]! @ write out Xi
vst1.64 d0,[r0]
bx lr @ bx lr
#endif
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
wlsfx/bnbb
| 31,590
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/fipsmodule/sha1-armv4-large.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
#include <openssl/arm_arch.h>
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.globl _sha1_block_data_order_nohw
.private_extern _sha1_block_data_order_nohw
#ifdef __thumb2__
.thumb_func _sha1_block_data_order_nohw
#endif
.align 5
_sha1_block_data_order_nohw:
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
Lloop:
ldr r8,LK_00_19
mov r14,sp
sub sp,sp,#15*4
mov r5,r5,ror#30
mov r6,r6,ror#30
mov r7,r7,ror#30 @ [6]
L_00_15:
#if __ARM_ARCH<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r6,r8,r6,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r4,r5 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r6,r8,r6,ror#2 @ E+=K_00_19
eor r10,r4,r5 @ F_xx_xx
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r3,r10,ror#2
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r6,r6,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r5,r8,r5,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r3,r4 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r5,r8,r5,ror#2 @ E+=K_00_19
eor r10,r3,r4 @ F_xx_xx
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r7,r10,ror#2
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r5,r5,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r4,r8,r4,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r7,r3 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r4,r8,r4,ror#2 @ E+=K_00_19
eor r10,r7,r3 @ F_xx_xx
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r6,r10,ror#2
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r4,r4,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r3,r8,r3,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r6,r7 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r3,r8,r3,ror#2 @ E+=K_00_19
eor r10,r6,r7 @ F_xx_xx
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r5,r10,ror#2
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D)
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp
#endif
bne L_00_15 @ [((11+4)*5+2)*3]
sub sp,sp,#25*4
#if __ARM_ARCH<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
add r6,r6,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
add r5,r5,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
add r4,r4,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
add r3,r3,r10 @ E+=F_00_19(B,C,D)
ldr r8,LK_20_39 @ [+15+16*4]
cmn sp,#0 @ [+3], clear carry to denote 20_39
L_20_39_or_60_79:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r4,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp @ preserve carry
#endif
bne L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs L_done @ [+((12+3)*5+2)*4], spare 300 bytes
ldr r8,LK_40_59
sub sp,sp,#20*4 @ [+2]
L_40_59:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r4,r10,ror#2 @ F_xx_xx
and r11,r5,r6 @ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_40_59(B,C,D)
add r7,r7,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
and r11,r4,r5 @ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_40_59(B,C,D)
add r6,r6,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
and r11,r3,r4 @ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_40_59(B,C,D)
add r5,r5,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
and r11,r7,r3 @ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_40_59(B,C,D)
add r4,r4,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
and r11,r6,r7 @ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp
#endif
bne L_40_59 @ [+((12+5)*5+2)*4]
ldr r8,LK_60_79
sub sp,sp,#20*4
cmp sp,#0 @ set carry to denote 60_79
b L_20_39_or_60_79 @ [+4], spare 300 bytes
L_done:
add sp,sp,#80*4 @ "deallocate" stack frame
ldmia r0,{r8,r9,r10,r11,r12}
add r3,r8,r3
add r4,r9,r4
add r5,r10,r5,ror#2
add r6,r11,r6,ror#2
add r7,r12,r7,ror#2
stmia r0,{r3,r4,r5,r6,r7}
teq r1,r2
bne Lloop @ [+18], total 1307
#if __ARM_ARCH>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.align 5
LK_00_19:.word 0x5a827999
LK_20_39:.word 0x6ed9eba1
LK_40_59:.word 0x8f1bbcdc
LK_60_79:.word 0xca62c1d6
.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 5
#if __ARM_MAX_ARCH__>=7
.globl _sha1_block_data_order_neon
.private_extern _sha1_block_data_order_neon
#ifdef __thumb2__
.thumb_func _sha1_block_data_order_neon
#endif
.align 4
_sha1_block_data_order_neon:
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
@ dmb @ errata #451034 on early Cortex A8
@ vstmdb sp!,{d8-d15} @ ABI specification says so
mov r14,sp
sub r12,sp,#64
adr r8,LK_00_19
bic r12,r12,#15 @ align for 128-bit stores
ldmia r0,{r3,r4,r5,r6,r7} @ load context
mov sp,r12 @ alloca
vld1.8 {q0,q1},[r1]! @ handles unaligned
veor q15,q15,q15
vld1.8 {q2,q3},[r1]!
vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19
vrev32.8 q0,q0 @ yes, even on
vrev32.8 q1,q1 @ big-endian...
vrev32.8 q2,q2
vadd.i32 q8,q0,q14
vrev32.8 q3,q3
vadd.i32 q9,q1,q14
vst1.32 {q8},[r12,:128]!
vadd.i32 q10,q2,q14
vst1.32 {q9},[r12,:128]!
vst1.32 {q10},[r12,:128]!
ldr r9,[sp] @ big RAW stall
Loop_neon:
vext.8 q8,q0,q1,#8
bic r10,r6,r4
add r7,r7,r9
and r11,r5,r4
vadd.i32 q13,q3,q14
ldr r9,[sp,#4]
add r7,r7,r3,ror#27
vext.8 q12,q3,q15,#4
eor r11,r11,r10
mov r4,r4,ror#2
add r7,r7,r11
veor q8,q8,q0
bic r10,r5,r3
add r6,r6,r9
veor q12,q12,q2
and r11,r4,r3
ldr r9,[sp,#8]
veor q12,q12,q8
add r6,r6,r7,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q13,q15,q12,#4
bic r10,r4,r7
add r5,r5,r9
vadd.i32 q8,q12,q12
and r11,r3,r7
ldr r9,[sp,#12]
vsri.32 q8,q12,#31
add r5,r5,r6,ror#27
eor r11,r11,r10
mov r7,r7,ror#2
vshr.u32 q12,q13,#30
add r5,r5,r11
bic r10,r3,r6
vshl.u32 q13,q13,#2
add r4,r4,r9
and r11,r7,r6
veor q8,q8,q12
ldr r9,[sp,#16]
add r4,r4,r5,ror#27
veor q8,q8,q13
eor r11,r11,r10
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q9,q1,q2,#8
bic r10,r7,r5
add r3,r3,r9
and r11,r6,r5
vadd.i32 q13,q8,q14
ldr r9,[sp,#20]
vld1.32 {d28[],d29[]},[r8,:32]!
add r3,r3,r4,ror#27
vext.8 q12,q8,q15,#4
eor r11,r11,r10
mov r5,r5,ror#2
add r3,r3,r11
veor q9,q9,q1
bic r10,r6,r4
add r7,r7,r9
veor q12,q12,q3
and r11,r5,r4
ldr r9,[sp,#24]
veor q12,q12,q9
add r7,r7,r3,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q13,q15,q12,#4
bic r10,r5,r3
add r6,r6,r9
vadd.i32 q9,q12,q12
and r11,r4,r3
ldr r9,[sp,#28]
vsri.32 q9,q12,#31
add r6,r6,r7,ror#27
eor r11,r11,r10
mov r3,r3,ror#2
vshr.u32 q12,q13,#30
add r6,r6,r11
bic r10,r4,r7
vshl.u32 q13,q13,#2
add r5,r5,r9
and r11,r3,r7
veor q9,q9,q12
ldr r9,[sp,#32]
add r5,r5,r6,ror#27
veor q9,q9,q13
eor r11,r11,r10
mov r7,r7,ror#2
add r5,r5,r11
vext.8 q10,q2,q3,#8
bic r10,r3,r6
add r4,r4,r9
and r11,r7,r6
vadd.i32 q13,q9,q14
ldr r9,[sp,#36]
add r4,r4,r5,ror#27
vext.8 q12,q9,q15,#4
eor r11,r11,r10
mov r6,r6,ror#2
add r4,r4,r11
veor q10,q10,q2
bic r10,r7,r5
add r3,r3,r9
veor q12,q12,q8
and r11,r6,r5
ldr r9,[sp,#40]
veor q12,q12,q10
add r3,r3,r4,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q13,q15,q12,#4
bic r10,r6,r4
add r7,r7,r9
vadd.i32 q10,q12,q12
and r11,r5,r4
ldr r9,[sp,#44]
vsri.32 q10,q12,#31
add r7,r7,r3,ror#27
eor r11,r11,r10
mov r4,r4,ror#2
vshr.u32 q12,q13,#30
add r7,r7,r11
bic r10,r5,r3
vshl.u32 q13,q13,#2
add r6,r6,r9
and r11,r4,r3
veor q10,q10,q12
ldr r9,[sp,#48]
add r6,r6,r7,ror#27
veor q10,q10,q13
eor r11,r11,r10
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q11,q3,q8,#8
bic r10,r4,r7
add r5,r5,r9
and r11,r3,r7
vadd.i32 q13,q10,q14
ldr r9,[sp,#52]
add r5,r5,r6,ror#27
vext.8 q12,q10,q15,#4
eor r11,r11,r10
mov r7,r7,ror#2
add r5,r5,r11
veor q11,q11,q3
bic r10,r3,r6
add r4,r4,r9
veor q12,q12,q9
and r11,r7,r6
ldr r9,[sp,#56]
veor q12,q12,q11
add r4,r4,r5,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q13,q15,q12,#4
bic r10,r7,r5
add r3,r3,r9
vadd.i32 q11,q12,q12
and r11,r6,r5
ldr r9,[sp,#60]
vsri.32 q11,q12,#31
add r3,r3,r4,ror#27
eor r11,r11,r10
mov r5,r5,ror#2
vshr.u32 q12,q13,#30
add r3,r3,r11
bic r10,r6,r4
vshl.u32 q13,q13,#2
add r7,r7,r9
and r11,r5,r4
veor q11,q11,q12
ldr r9,[sp,#0]
add r7,r7,r3,ror#27
veor q11,q11,q13
eor r11,r11,r10
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q12,q10,q11,#8
bic r10,r5,r3
add r6,r6,r9
and r11,r4,r3
veor q0,q0,q8
ldr r9,[sp,#4]
add r6,r6,r7,ror#27
veor q0,q0,q1
eor r11,r11,r10
mov r3,r3,ror#2
vadd.i32 q13,q11,q14
add r6,r6,r11
bic r10,r4,r7
veor q12,q12,q0
add r5,r5,r9
and r11,r3,r7
vshr.u32 q0,q12,#30
ldr r9,[sp,#8]
add r5,r5,r6,ror#27
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
eor r11,r11,r10
mov r7,r7,ror#2
vsli.32 q0,q12,#2
add r5,r5,r11
bic r10,r3,r6
add r4,r4,r9
and r11,r7,r6
ldr r9,[sp,#12]
add r4,r4,r5,ror#27
eor r11,r11,r10
mov r6,r6,ror#2
add r4,r4,r11
bic r10,r7,r5
add r3,r3,r9
and r11,r6,r5
ldr r9,[sp,#16]
add r3,r3,r4,ror#27
eor r11,r11,r10
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q12,q11,q0,#8
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#20]
veor q1,q1,q9
eor r11,r10,r5
add r7,r7,r3,ror#27
veor q1,q1,q2
mov r4,r4,ror#2
add r7,r7,r11
vadd.i32 q13,q0,q14
eor r10,r3,r5
add r6,r6,r9
veor q12,q12,q1
ldr r9,[sp,#24]
eor r11,r10,r4
vshr.u32 q1,q12,#30
add r6,r6,r7,ror#27
mov r3,r3,ror#2
vst1.32 {q13},[r12,:128]!
add r6,r6,r11
eor r10,r7,r4
vsli.32 q1,q12,#2
add r5,r5,r9
ldr r9,[sp,#28]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#32]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q12,q0,q1,#8
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#36]
veor q2,q2,q10
eor r11,r10,r6
add r3,r3,r4,ror#27
veor q2,q2,q3
mov r5,r5,ror#2
add r3,r3,r11
vadd.i32 q13,q1,q14
eor r10,r4,r6
vld1.32 {d28[],d29[]},[r8,:32]!
add r7,r7,r9
veor q12,q12,q2
ldr r9,[sp,#40]
eor r11,r10,r5
vshr.u32 q2,q12,#30
add r7,r7,r3,ror#27
mov r4,r4,ror#2
vst1.32 {q13},[r12,:128]!
add r7,r7,r11
eor r10,r3,r5
vsli.32 q2,q12,#2
add r6,r6,r9
ldr r9,[sp,#44]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#48]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
vext.8 q12,q1,q2,#8
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#52]
veor q3,q3,q11
eor r11,r10,r7
add r4,r4,r5,ror#27
veor q3,q3,q8
mov r6,r6,ror#2
add r4,r4,r11
vadd.i32 q13,q2,q14
eor r10,r5,r7
add r3,r3,r9
veor q12,q12,q3
ldr r9,[sp,#56]
eor r11,r10,r6
vshr.u32 q3,q12,#30
add r3,r3,r4,ror#27
mov r5,r5,ror#2
vst1.32 {q13},[r12,:128]!
add r3,r3,r11
eor r10,r4,r6
vsli.32 q3,q12,#2
add r7,r7,r9
ldr r9,[sp,#60]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#0]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q12,q2,q3,#8
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#4]
veor q8,q8,q0
eor r11,r10,r3
add r5,r5,r6,ror#27
veor q8,q8,q9
mov r7,r7,ror#2
add r5,r5,r11
vadd.i32 q13,q3,q14
eor r10,r6,r3
add r4,r4,r9
veor q12,q12,q8
ldr r9,[sp,#8]
eor r11,r10,r7
vshr.u32 q8,q12,#30
add r4,r4,r5,ror#27
mov r6,r6,ror#2
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
add r4,r4,r11
eor r10,r5,r7
vsli.32 q8,q12,#2
add r3,r3,r9
ldr r9,[sp,#12]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#16]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q12,q3,q8,#8
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#20]
veor q9,q9,q1
eor r11,r10,r4
add r6,r6,r7,ror#27
veor q9,q9,q10
mov r3,r3,ror#2
add r6,r6,r11
vadd.i32 q13,q8,q14
eor r10,r7,r4
add r5,r5,r9
veor q12,q12,q9
ldr r9,[sp,#24]
eor r11,r10,r3
vshr.u32 q9,q12,#30
add r5,r5,r6,ror#27
mov r7,r7,ror#2
vst1.32 {q13},[r12,:128]!
add r5,r5,r11
eor r10,r6,r3
vsli.32 q9,q12,#2
add r4,r4,r9
ldr r9,[sp,#28]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#32]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q12,q8,q9,#8
add r7,r7,r9
and r10,r5,r6
ldr r9,[sp,#36]
veor q10,q10,q2
add r7,r7,r3,ror#27
eor r11,r5,r6
veor q10,q10,q11
add r7,r7,r10
and r11,r11,r4
vadd.i32 q13,q9,q14
mov r4,r4,ror#2
add r7,r7,r11
veor q12,q12,q10
add r6,r6,r9
and r10,r4,r5
vshr.u32 q10,q12,#30
ldr r9,[sp,#40]
add r6,r6,r7,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r4,r5
add r6,r6,r10
vsli.32 q10,q12,#2
and r11,r11,r3
mov r3,r3,ror#2
add r6,r6,r11
add r5,r5,r9
and r10,r3,r4
ldr r9,[sp,#44]
add r5,r5,r6,ror#27
eor r11,r3,r4
add r5,r5,r10
and r11,r11,r7
mov r7,r7,ror#2
add r5,r5,r11
add r4,r4,r9
and r10,r7,r3
ldr r9,[sp,#48]
add r4,r4,r5,ror#27
eor r11,r7,r3
add r4,r4,r10
and r11,r11,r6
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q12,q9,q10,#8
add r3,r3,r9
and r10,r6,r7
ldr r9,[sp,#52]
veor q11,q11,q3
add r3,r3,r4,ror#27
eor r11,r6,r7
veor q11,q11,q0
add r3,r3,r10
and r11,r11,r5
vadd.i32 q13,q10,q14
mov r5,r5,ror#2
vld1.32 {d28[],d29[]},[r8,:32]!
add r3,r3,r11
veor q12,q12,q11
add r7,r7,r9
and r10,r5,r6
vshr.u32 q11,q12,#30
ldr r9,[sp,#56]
add r7,r7,r3,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r5,r6
add r7,r7,r10
vsli.32 q11,q12,#2
and r11,r11,r4
mov r4,r4,ror#2
add r7,r7,r11
add r6,r6,r9
and r10,r4,r5
ldr r9,[sp,#60]
add r6,r6,r7,ror#27
eor r11,r4,r5
add r6,r6,r10
and r11,r11,r3
mov r3,r3,ror#2
add r6,r6,r11
add r5,r5,r9
and r10,r3,r4
ldr r9,[sp,#0]
add r5,r5,r6,ror#27
eor r11,r3,r4
add r5,r5,r10
and r11,r11,r7
mov r7,r7,ror#2
add r5,r5,r11
vext.8 q12,q10,q11,#8
add r4,r4,r9
and r10,r7,r3
ldr r9,[sp,#4]
veor q0,q0,q8
add r4,r4,r5,ror#27
eor r11,r7,r3
veor q0,q0,q1
add r4,r4,r10
and r11,r11,r6
vadd.i32 q13,q11,q14
mov r6,r6,ror#2
add r4,r4,r11
veor q12,q12,q0
add r3,r3,r9
and r10,r6,r7
vshr.u32 q0,q12,#30
ldr r9,[sp,#8]
add r3,r3,r4,ror#27
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
eor r11,r6,r7
add r3,r3,r10
vsli.32 q0,q12,#2
and r11,r11,r5
mov r5,r5,ror#2
add r3,r3,r11
add r7,r7,r9
and r10,r5,r6
ldr r9,[sp,#12]
add r7,r7,r3,ror#27
eor r11,r5,r6
add r7,r7,r10
and r11,r11,r4
mov r4,r4,ror#2
add r7,r7,r11
add r6,r6,r9
and r10,r4,r5
ldr r9,[sp,#16]
add r6,r6,r7,ror#27
eor r11,r4,r5
add r6,r6,r10
and r11,r11,r3
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q12,q11,q0,#8
add r5,r5,r9
and r10,r3,r4
ldr r9,[sp,#20]
veor q1,q1,q9
add r5,r5,r6,ror#27
eor r11,r3,r4
veor q1,q1,q2
add r5,r5,r10
and r11,r11,r7
vadd.i32 q13,q0,q14
mov r7,r7,ror#2
add r5,r5,r11
veor q12,q12,q1
add r4,r4,r9
and r10,r7,r3
vshr.u32 q1,q12,#30
ldr r9,[sp,#24]
add r4,r4,r5,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r7,r3
add r4,r4,r10
vsli.32 q1,q12,#2
and r11,r11,r6
mov r6,r6,ror#2
add r4,r4,r11
add r3,r3,r9
and r10,r6,r7
ldr r9,[sp,#28]
add r3,r3,r4,ror#27
eor r11,r6,r7
add r3,r3,r10
and r11,r11,r5
mov r5,r5,ror#2
add r3,r3,r11
add r7,r7,r9
and r10,r5,r6
ldr r9,[sp,#32]
add r7,r7,r3,ror#27
eor r11,r5,r6
add r7,r7,r10
and r11,r11,r4
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q12,q0,q1,#8
add r6,r6,r9
and r10,r4,r5
ldr r9,[sp,#36]
veor q2,q2,q10
add r6,r6,r7,ror#27
eor r11,r4,r5
veor q2,q2,q3
add r6,r6,r10
and r11,r11,r3
vadd.i32 q13,q1,q14
mov r3,r3,ror#2
add r6,r6,r11
veor q12,q12,q2
add r5,r5,r9
and r10,r3,r4
vshr.u32 q2,q12,#30
ldr r9,[sp,#40]
add r5,r5,r6,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r3,r4
add r5,r5,r10
vsli.32 q2,q12,#2
and r11,r11,r7
mov r7,r7,ror#2
add r5,r5,r11
add r4,r4,r9
and r10,r7,r3
ldr r9,[sp,#44]
add r4,r4,r5,ror#27
eor r11,r7,r3
add r4,r4,r10
and r11,r11,r6
mov r6,r6,ror#2
add r4,r4,r11
add r3,r3,r9
and r10,r6,r7
ldr r9,[sp,#48]
add r3,r3,r4,ror#27
eor r11,r6,r7
add r3,r3,r10
and r11,r11,r5
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q12,q1,q2,#8
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#52]
veor q3,q3,q11
eor r11,r10,r5
add r7,r7,r3,ror#27
veor q3,q3,q8
mov r4,r4,ror#2
add r7,r7,r11
vadd.i32 q13,q2,q14
eor r10,r3,r5
add r6,r6,r9
veor q12,q12,q3
ldr r9,[sp,#56]
eor r11,r10,r4
vshr.u32 q3,q12,#30
add r6,r6,r7,ror#27
mov r3,r3,ror#2
vst1.32 {q13},[r12,:128]!
add r6,r6,r11
eor r10,r7,r4
vsli.32 q3,q12,#2
add r5,r5,r9
ldr r9,[sp,#60]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#0]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
vadd.i32 q13,q3,q14
eor r10,r5,r7
add r3,r3,r9
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
teq r1,r2
sub r8,r8,#16
it eq
subeq r1,r1,#64
vld1.8 {q0,q1},[r1]!
ldr r9,[sp,#4]
eor r11,r10,r6
vld1.8 {q2,q3},[r1]!
add r3,r3,r4,ror#27
mov r5,r5,ror#2
vld1.32 {d28[],d29[]},[r8,:32]!
add r3,r3,r11
eor r10,r4,r6
vrev32.8 q0,q0
add r7,r7,r9
ldr r9,[sp,#8]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#12]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#16]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
vrev32.8 q1,q1
eor r10,r6,r3
add r4,r4,r9
vadd.i32 q8,q0,q14
ldr r9,[sp,#20]
eor r11,r10,r7
vst1.32 {q8},[r12,:128]!
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#24]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#28]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#32]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
vrev32.8 q2,q2
eor r10,r7,r4
add r5,r5,r9
vadd.i32 q9,q1,q14
ldr r9,[sp,#36]
eor r11,r10,r3
vst1.32 {q9},[r12,:128]!
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#40]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#44]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#48]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
vrev32.8 q3,q3
eor r10,r3,r5
add r6,r6,r9
vadd.i32 q10,q2,q14
ldr r9,[sp,#52]
eor r11,r10,r4
vst1.32 {q10},[r12,:128]!
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#56]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#60]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
ldmia r0,{r9,r10,r11,r12} @ accumulate context
add r3,r3,r9
ldr r9,[r0,#16]
add r4,r4,r10
add r5,r5,r11
add r6,r6,r12
it eq
moveq sp,r14
add r7,r7,r9
it ne
ldrne r9,[sp]
stmia r0,{r3,r4,r5,r6,r7}
itt ne
addne r12,sp,#3*16
bne Loop_neon
@ vldmia sp!,{d8-d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#endif
#if __ARM_MAX_ARCH__>=7
# if defined(__thumb2__)
# define INST(a,b,c,d) .byte c,d|0xf,a,b
# else
# define INST(a,b,c,d) .byte a,b,c,d|0x10
# endif
.globl _sha1_block_data_order_hw
.private_extern _sha1_block_data_order_hw
#ifdef __thumb2__
.thumb_func _sha1_block_data_order_hw
#endif
.align 5
_sha1_block_data_order_hw:
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
veor q1,q1,q1
adr r3,LK_00_19
vld1.32 {q0},[r0]!
vld1.32 {d2[0]},[r0]
sub r0,r0,#16
vld1.32 {d16[],d17[]},[r3,:32]!
vld1.32 {d18[],d19[]},[r3,:32]!
vld1.32 {d20[],d21[]},[r3,:32]!
vld1.32 {d22[],d23[]},[r3,:32]
Loop_v8:
vld1.8 {q4,q5},[r1]!
vld1.8 {q6,q7},[r1]!
vrev32.8 q4,q4
vrev32.8 q5,q5
vadd.i32 q12,q8,q4
vrev32.8 q6,q6
vmov q14,q0 @ offload
subs r2,r2,#1
vadd.i32 q13,q8,q5
vrev32.8 q7,q7
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 0
INST(0x68,0x0c,0x02,0xe2) @ sha1c q0,q1,q12
vadd.i32 q12,q8,q6
INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 1
INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13
vadd.i32 q13,q8,q7
INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7
INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 2
INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12
vadd.i32 q12,q8,q4
INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4
INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 3
INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13
vadd.i32 q13,q9,q5
INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5
INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 4
INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12
vadd.i32 q12,q9,q6
INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6
INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 5
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q13,q9,q7
INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7
INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 6
INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12
vadd.i32 q12,q9,q4
INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4
INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 7
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q13,q9,q5
INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5
INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 8
INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12
vadd.i32 q12,q10,q6
INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6
INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 9
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q13,q10,q7
INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7
INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 10
INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12
vadd.i32 q12,q10,q4
INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4
INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 11
INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13
vadd.i32 q13,q10,q5
INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5
INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 12
INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12
vadd.i32 q12,q10,q6
INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6
INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 13
INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13
vadd.i32 q13,q11,q7
INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7
INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 14
INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12
vadd.i32 q12,q11,q4
INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4
INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 15
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q13,q11,q5
INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5
INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 16
INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12
vadd.i32 q12,q11,q6
INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 17
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q13,q11,q7
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 18
INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 19
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q1,q1,q2
vadd.i32 q0,q0,q14
bne Loop_v8
vst1.32 {q0},[r0]!
vst1.32 {d2[0]},[r0]
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
bx lr @ bx lr
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
wlsfx/bnbb
| 28,701
|
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/ios-arm/crypto/chacha/chacha-armv4.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__APPLE__)
#include <openssl/arm_arch.h>
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.text
#if defined(__thumb2__) || defined(__clang__)
.syntax unified
#endif
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
#if defined(__thumb2__) || defined(__clang__)
#define ldrhsb ldrbhs
#endif
.align 5
Lsigma:
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral
Lone:
.long 1,0,0,0
.globl _ChaCha20_ctr32_nohw
.private_extern _ChaCha20_ctr32_nohw
#ifdef __thumb2__
.thumb_func _ChaCha20_ctr32_nohw
#endif
.align 5
_ChaCha20_ctr32_nohw:
ldr r12,[sp,#0] @ pull pointer to counter and nonce
stmdb sp!,{r0,r1,r2,r4-r11,lr}
adr r14,Lsigma
ldmia r12,{r4,r5,r6,r7} @ load counter and nonce
sub sp,sp,#4*(16) @ off-load area
stmdb sp!,{r4,r5,r6,r7} @ copy counter and nonce
ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key
ldmia r14,{r0,r1,r2,r3} @ load sigma
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy key
stmdb sp!,{r0,r1,r2,r3} @ copy sigma
str r10,[sp,#4*(16+10)] @ off-load "rx"
str r11,[sp,#4*(16+11)] @ off-load "rx"
b Loop_outer_enter
.align 4
Loop_outer:
ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material
str r11,[sp,#4*(32+2)] @ save len
str r12, [sp,#4*(32+1)] @ save inp
str r14, [sp,#4*(32+0)] @ save out
Loop_outer_enter:
ldr r11, [sp,#4*(15)]
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
ldr r10, [sp,#4*(13)]
ldr r14,[sp,#4*(14)]
str r11, [sp,#4*(16+15)]
mov r11,#10
b Loop
.align 4
Loop:
subs r11,r11,#1
add r0,r0,r4
mov r12,r12,ror#16
add r1,r1,r5
mov r10,r10,ror#16
eor r12,r12,r0,ror#16
eor r10,r10,r1,ror#16
add r8,r8,r12
mov r4,r4,ror#20
add r9,r9,r10
mov r5,r5,ror#20
eor r4,r4,r8,ror#20
eor r5,r5,r9,ror#20
add r0,r0,r4
mov r12,r12,ror#24
add r1,r1,r5
mov r10,r10,ror#24
eor r12,r12,r0,ror#24
eor r10,r10,r1,ror#24
add r8,r8,r12
mov r4,r4,ror#25
add r9,r9,r10
mov r5,r5,ror#25
str r10,[sp,#4*(16+13)]
ldr r10,[sp,#4*(16+15)]
eor r4,r4,r8,ror#25
eor r5,r5,r9,ror#25
str r8,[sp,#4*(16+8)]
ldr r8,[sp,#4*(16+10)]
add r2,r2,r6
mov r14,r14,ror#16
str r9,[sp,#4*(16+9)]
ldr r9,[sp,#4*(16+11)]
add r3,r3,r7
mov r10,r10,ror#16
eor r14,r14,r2,ror#16
eor r10,r10,r3,ror#16
add r8,r8,r14
mov r6,r6,ror#20
add r9,r9,r10
mov r7,r7,ror#20
eor r6,r6,r8,ror#20
eor r7,r7,r9,ror#20
add r2,r2,r6
mov r14,r14,ror#24
add r3,r3,r7
mov r10,r10,ror#24
eor r14,r14,r2,ror#24
eor r10,r10,r3,ror#24
add r8,r8,r14
mov r6,r6,ror#25
add r9,r9,r10
mov r7,r7,ror#25
eor r6,r6,r8,ror#25
eor r7,r7,r9,ror#25
add r0,r0,r5
mov r10,r10,ror#16
add r1,r1,r6
mov r12,r12,ror#16
eor r10,r10,r0,ror#16
eor r12,r12,r1,ror#16
add r8,r8,r10
mov r5,r5,ror#20
add r9,r9,r12
mov r6,r6,ror#20
eor r5,r5,r8,ror#20
eor r6,r6,r9,ror#20
add r0,r0,r5
mov r10,r10,ror#24
add r1,r1,r6
mov r12,r12,ror#24
eor r10,r10,r0,ror#24
eor r12,r12,r1,ror#24
add r8,r8,r10
mov r5,r5,ror#25
str r10,[sp,#4*(16+15)]
ldr r10,[sp,#4*(16+13)]
add r9,r9,r12
mov r6,r6,ror#25
eor r5,r5,r8,ror#25
eor r6,r6,r9,ror#25
str r8,[sp,#4*(16+10)]
ldr r8,[sp,#4*(16+8)]
add r2,r2,r7
mov r10,r10,ror#16
str r9,[sp,#4*(16+11)]
ldr r9,[sp,#4*(16+9)]
add r3,r3,r4
mov r14,r14,ror#16
eor r10,r10,r2,ror#16
eor r14,r14,r3,ror#16
add r8,r8,r10
mov r7,r7,ror#20
add r9,r9,r14
mov r4,r4,ror#20
eor r7,r7,r8,ror#20
eor r4,r4,r9,ror#20
add r2,r2,r7
mov r10,r10,ror#24
add r3,r3,r4
mov r14,r14,ror#24
eor r10,r10,r2,ror#24
eor r14,r14,r3,ror#24
add r8,r8,r10
mov r7,r7,ror#25
add r9,r9,r14
mov r4,r4,ror#25
eor r7,r7,r8,ror#25
eor r4,r4,r9,ror#25
bne Loop
ldr r11,[sp,#4*(32+2)] @ load len
str r8, [sp,#4*(16+8)] @ modulo-scheduled store
str r9, [sp,#4*(16+9)]
str r12,[sp,#4*(16+12)]
str r10, [sp,#4*(16+13)]
str r14,[sp,#4*(16+14)]
@ at this point we have first half of 512-bit result in
@ rx and second half at sp+4*(16+8)
cmp r11,#64 @ done yet?
#ifdef __thumb2__
itete lo
#endif
addlo r12,sp,#4*(0) @ shortcut or ...
ldrhs r12,[sp,#4*(32+1)] @ ... load inp
addlo r14,sp,#4*(0) @ shortcut or ...
ldrhs r14,[sp,#4*(32+0)] @ ... load out
ldr r8,[sp,#4*(0)] @ load key material
ldr r9,[sp,#4*(1)]
#if __ARM_ARCH>=6 || !defined(__ARMEB__)
# if __ARM_ARCH<7
orr r10,r12,r14
tst r10,#3 @ are input and output aligned?
ldr r10,[sp,#4*(2)]
bne Lunaligned
cmp r11,#64 @ restore flags
# else
ldr r10,[sp,#4*(2)]
# endif
ldr r11,[sp,#4*(3)]
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r2,r2,r10
add r3,r3,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r0,r0,r8 @ xor with input
eorhs r1,r1,r9
add r8,sp,#4*(4)
str r0,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r2,r2,r10
eorhs r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r1,[r14,#-12]
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r6,r6,r10
add r7,r7,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r4,r4,r8
eorhs r5,r5,r9
add r8,sp,#4*(8)
str r4,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r6,r6,r10
eorhs r7,r7,r11
str r5,[r14,#-12]
ldmia r8,{r8,r9,r10,r11} @ load key material
str r6,[r14,#-8]
add r0,sp,#4*(16+8)
str r7,[r14,#-4]
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
# ifdef __thumb2__
itt hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it
strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it
add r2,r2,r10
add r3,r3,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r0,r0,r8
eorhs r1,r1,r9
add r8,sp,#4*(12)
str r0,[r14],#16 @ store output
# ifdef __thumb2__
itt hs
# endif
eorhs r2,r2,r10
eorhs r3,r3,r11
str r1,[r14,#-12]
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
# ifdef __thumb2__
itt hi
# endif
addhi r8,r8,#1 @ next counter value
strhi r8,[sp,#4*(12)] @ save next counter value
# ifdef __thumb2__
itt hs
# endif
ldrhs r8,[r12],#16 @ load input
ldrhs r9,[r12,#-12]
add r6,r6,r10
add r7,r7,r11
# ifdef __thumb2__
itt hs
# endif
ldrhs r10,[r12,#-8]
ldrhs r11,[r12,#-4]
# if __ARM_ARCH>=6 && defined(__ARMEB__)
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
# ifdef __thumb2__
itt hs
# endif
eorhs r4,r4,r8
eorhs r5,r5,r9
# ifdef __thumb2__
it ne
# endif
ldrne r8,[sp,#4*(32+2)] @ re-load len
# ifdef __thumb2__
itt hs
# endif
eorhs r6,r6,r10
eorhs r7,r7,r11
str r4,[r14],#16 @ store output
str r5,[r14,#-12]
# ifdef __thumb2__
it hs
# endif
subhs r11,r8,#64 @ len-=64
str r6,[r14,#-8]
str r7,[r14,#-4]
bhi Loop_outer
beq Ldone
# if __ARM_ARCH<7
b Ltail
.align 4
Lunaligned:@ unaligned endian-neutral path
cmp r11,#64 @ restore flags
# endif
#endif
#if __ARM_ARCH<7
ldr r11,[sp,#4*(3)]
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
add r2,r2,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r3,r3,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r0,r8,r0 @ xor with input (or zero)
eor r1,r9,r1
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r2,r10,r2
strb r0,[r14],#16 @ store output
eor r3,r11,r3
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r1,[r14,#-12]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-8]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r3,[r14,#-4]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-15]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r1,[r14,#-11]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-7]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r3,[r14,#-3]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-14]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r1,[r14,#-10]
strb r2,[r14,#-6]
eor r0,r8,r0,lsr#8
strb r3,[r14,#-2]
eor r1,r9,r1,lsr#8
strb r0,[r14,#-13]
eor r2,r10,r2,lsr#8
strb r1,[r14,#-9]
eor r3,r11,r3,lsr#8
strb r2,[r14,#-5]
strb r3,[r14,#-1]
add r8,sp,#4*(4+0)
ldmia r8,{r8,r9,r10,r11} @ load key material
add r0,sp,#4*(16+8)
add r4,r4,r8 @ accumulate key material
add r5,r5,r9
add r6,r6,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r7,r7,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r4,r8,r4 @ xor with input (or zero)
eor r5,r9,r5
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r6,r10,r6
strb r4,[r14],#16 @ store output
eor r7,r11,r7
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r5,[r14,#-12]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-8]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r7,[r14,#-4]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-15]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r5,[r14,#-11]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-7]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r7,[r14,#-3]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-14]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r5,[r14,#-10]
strb r6,[r14,#-6]
eor r4,r8,r4,lsr#8
strb r7,[r14,#-2]
eor r5,r9,r5,lsr#8
strb r4,[r14,#-13]
eor r6,r10,r6,lsr#8
strb r5,[r14,#-9]
eor r7,r11,r7,lsr#8
strb r6,[r14,#-5]
strb r7,[r14,#-1]
add r8,sp,#4*(4+4)
ldmia r8,{r8,r9,r10,r11} @ load key material
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
# ifdef __thumb2__
itt hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx"
strhi r11,[sp,#4*(16+11)] @ copy "rx"
add r0,r0,r8 @ accumulate key material
add r1,r1,r9
add r2,r2,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r3,r3,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r0,r8,r0 @ xor with input (or zero)
eor r1,r9,r1
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r2,r10,r2
strb r0,[r14],#16 @ store output
eor r3,r11,r3
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r1,[r14,#-12]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-8]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r3,[r14,#-4]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-15]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r1,[r14,#-11]
eor r0,r8,r0,lsr#8
strb r2,[r14,#-7]
eor r1,r9,r1,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r3,[r14,#-3]
eor r2,r10,r2,lsr#8
strb r0,[r14,#-14]
eor r3,r11,r3,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r1,[r14,#-10]
strb r2,[r14,#-6]
eor r0,r8,r0,lsr#8
strb r3,[r14,#-2]
eor r1,r9,r1,lsr#8
strb r0,[r14,#-13]
eor r2,r10,r2,lsr#8
strb r1,[r14,#-9]
eor r3,r11,r3,lsr#8
strb r2,[r14,#-5]
strb r3,[r14,#-1]
add r8,sp,#4*(4+8)
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
# ifdef __thumb2__
itt hi
# endif
addhi r8,r8,#1 @ next counter value
strhi r8,[sp,#4*(12)] @ save next counter value
add r5,r5,r9
add r6,r6,r10
# ifdef __thumb2__
itete lo
# endif
eorlo r8,r8,r8 @ zero or ...
ldrhsb r8,[r12],#16 @ ... load input
eorlo r9,r9,r9
ldrhsb r9,[r12,#-12]
add r7,r7,r11
# ifdef __thumb2__
itete lo
# endif
eorlo r10,r10,r10
ldrhsb r10,[r12,#-8]
eorlo r11,r11,r11
ldrhsb r11,[r12,#-4]
eor r4,r8,r4 @ xor with input (or zero)
eor r5,r9,r5
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-15] @ load more input
ldrhsb r9,[r12,#-11]
eor r6,r10,r6
strb r4,[r14],#16 @ store output
eor r7,r11,r7
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-7]
ldrhsb r11,[r12,#-3]
strb r5,[r14,#-12]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-8]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-14] @ load more input
ldrhsb r9,[r12,#-10]
strb r7,[r14,#-4]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-15]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-6]
ldrhsb r11,[r12,#-2]
strb r5,[r14,#-11]
eor r4,r8,r4,lsr#8
strb r6,[r14,#-7]
eor r5,r9,r5,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r8,[r12,#-13] @ load more input
ldrhsb r9,[r12,#-9]
strb r7,[r14,#-3]
eor r6,r10,r6,lsr#8
strb r4,[r14,#-14]
eor r7,r11,r7,lsr#8
# ifdef __thumb2__
itt hs
# endif
ldrhsb r10,[r12,#-5]
ldrhsb r11,[r12,#-1]
strb r5,[r14,#-10]
strb r6,[r14,#-6]
eor r4,r8,r4,lsr#8
strb r7,[r14,#-2]
eor r5,r9,r5,lsr#8
strb r4,[r14,#-13]
eor r6,r10,r6,lsr#8
strb r5,[r14,#-9]
eor r7,r11,r7,lsr#8
strb r6,[r14,#-5]
strb r7,[r14,#-1]
# ifdef __thumb2__
it ne
# endif
ldrne r8,[sp,#4*(32+2)] @ re-load len
# ifdef __thumb2__
it hs
# endif
subhs r11,r8,#64 @ len-=64
bhi Loop_outer
beq Ldone
#endif
Ltail:
ldr r12,[sp,#4*(32+1)] @ load inp
add r9,sp,#4*(0)
ldr r14,[sp,#4*(32+0)] @ load out
Loop_tail:
ldrb r10,[r9],#1 @ read buffer on stack
ldrb r11,[r12],#1 @ read input
subs r8,r8,#1
eor r11,r11,r10
strb r11,[r14],#1 @ store output
bne Loop_tail
Ldone:
add sp,sp,#4*(32+3)
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
#if __ARM_MAX_ARCH__>=7
.globl _ChaCha20_ctr32_neon
.private_extern _ChaCha20_ctr32_neon
#ifdef __thumb2__
.thumb_func _ChaCha20_ctr32_neon
#endif
.align 5
_ChaCha20_ctr32_neon:
ldr r12,[sp,#0] @ pull pointer to counter and nonce
stmdb sp!,{r0,r1,r2,r4-r11,lr}
adr r14,Lsigma
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI spec says so
stmdb sp!,{r0,r1,r2,r3}
vld1.32 {q1,q2},[r3] @ load key
ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key
sub sp,sp,#4*(16+16)
vld1.32 {q3},[r12] @ load counter and nonce
add r12,sp,#4*8
ldmia r14,{r0,r1,r2,r3} @ load sigma
vld1.32 {q0},[r14]! @ load sigma
vld1.32 {q12},[r14] @ one
vst1.32 {q2,q3},[r12] @ copy 1/2key|counter|nonce
vst1.32 {q0,q1},[sp] @ copy sigma|1/2key
str r10,[sp,#4*(16+10)] @ off-load "rx"
str r11,[sp,#4*(16+11)] @ off-load "rx"
vshl.i32 d26,d24,#1 @ two
vstr d24,[sp,#4*(16+0)]
vshl.i32 d28,d24,#2 @ four
vstr d26,[sp,#4*(16+2)]
vmov q4,q0
vstr d28,[sp,#4*(16+4)]
vmov q8,q0
vmov q5,q1
vmov q9,q1
b Loop_neon_enter
.align 4
Loop_neon_outer:
ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material
cmp r11,#64*2 @ if len<=64*2
bls Lbreak_neon @ switch to integer-only
vmov q4,q0
str r11,[sp,#4*(32+2)] @ save len
vmov q8,q0
str r12, [sp,#4*(32+1)] @ save inp
vmov q5,q1
str r14, [sp,#4*(32+0)] @ save out
vmov q9,q1
Loop_neon_enter:
ldr r11, [sp,#4*(15)]
vadd.i32 q7,q3,q12 @ counter+1
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
vmov q6,q2
ldr r10, [sp,#4*(13)]
vmov q10,q2
ldr r14,[sp,#4*(14)]
vadd.i32 q11,q7,q12 @ counter+2
str r11, [sp,#4*(16+15)]
mov r11,#10
add r12,r12,#3 @ counter+3
b Loop_neon
.align 4
Loop_neon:
subs r11,r11,#1
vadd.i32 q0,q0,q1
add r0,r0,r4
vadd.i32 q4,q4,q5
mov r12,r12,ror#16
vadd.i32 q8,q8,q9
add r1,r1,r5
veor q3,q3,q0
mov r10,r10,ror#16
veor q7,q7,q4
eor r12,r12,r0,ror#16
veor q11,q11,q8
eor r10,r10,r1,ror#16
vrev32.16 q3,q3
add r8,r8,r12
vrev32.16 q7,q7
mov r4,r4,ror#20
vrev32.16 q11,q11
add r9,r9,r10
vadd.i32 q2,q2,q3
mov r5,r5,ror#20
vadd.i32 q6,q6,q7
eor r4,r4,r8,ror#20
vadd.i32 q10,q10,q11
eor r5,r5,r9,ror#20
veor q12,q1,q2
add r0,r0,r4
veor q13,q5,q6
mov r12,r12,ror#24
veor q14,q9,q10
add r1,r1,r5
vshr.u32 q1,q12,#20
mov r10,r10,ror#24
vshr.u32 q5,q13,#20
eor r12,r12,r0,ror#24
vshr.u32 q9,q14,#20
eor r10,r10,r1,ror#24
vsli.32 q1,q12,#12
add r8,r8,r12
vsli.32 q5,q13,#12
mov r4,r4,ror#25
vsli.32 q9,q14,#12
add r9,r9,r10
vadd.i32 q0,q0,q1
mov r5,r5,ror#25
vadd.i32 q4,q4,q5
str r10,[sp,#4*(16+13)]
vadd.i32 q8,q8,q9
ldr r10,[sp,#4*(16+15)]
veor q12,q3,q0
eor r4,r4,r8,ror#25
veor q13,q7,q4
eor r5,r5,r9,ror#25
veor q14,q11,q8
str r8,[sp,#4*(16+8)]
vshr.u32 q3,q12,#24
ldr r8,[sp,#4*(16+10)]
vshr.u32 q7,q13,#24
add r2,r2,r6
vshr.u32 q11,q14,#24
mov r14,r14,ror#16
vsli.32 q3,q12,#8
str r9,[sp,#4*(16+9)]
vsli.32 q7,q13,#8
ldr r9,[sp,#4*(16+11)]
vsli.32 q11,q14,#8
add r3,r3,r7
vadd.i32 q2,q2,q3
mov r10,r10,ror#16
vadd.i32 q6,q6,q7
eor r14,r14,r2,ror#16
vadd.i32 q10,q10,q11
eor r10,r10,r3,ror#16
veor q12,q1,q2
add r8,r8,r14
veor q13,q5,q6
mov r6,r6,ror#20
veor q14,q9,q10
add r9,r9,r10
vshr.u32 q1,q12,#25
mov r7,r7,ror#20
vshr.u32 q5,q13,#25
eor r6,r6,r8,ror#20
vshr.u32 q9,q14,#25
eor r7,r7,r9,ror#20
vsli.32 q1,q12,#7
add r2,r2,r6
vsli.32 q5,q13,#7
mov r14,r14,ror#24
vsli.32 q9,q14,#7
add r3,r3,r7
vext.8 q2,q2,q2,#8
mov r10,r10,ror#24
vext.8 q6,q6,q6,#8
eor r14,r14,r2,ror#24
vext.8 q10,q10,q10,#8
eor r10,r10,r3,ror#24
vext.8 q1,q1,q1,#4
add r8,r8,r14
vext.8 q5,q5,q5,#4
mov r6,r6,ror#25
vext.8 q9,q9,q9,#4
add r9,r9,r10
vext.8 q3,q3,q3,#12
mov r7,r7,ror#25
vext.8 q7,q7,q7,#12
eor r6,r6,r8,ror#25
vext.8 q11,q11,q11,#12
eor r7,r7,r9,ror#25
vadd.i32 q0,q0,q1
add r0,r0,r5
vadd.i32 q4,q4,q5
mov r10,r10,ror#16
vadd.i32 q8,q8,q9
add r1,r1,r6
veor q3,q3,q0
mov r12,r12,ror#16
veor q7,q7,q4
eor r10,r10,r0,ror#16
veor q11,q11,q8
eor r12,r12,r1,ror#16
vrev32.16 q3,q3
add r8,r8,r10
vrev32.16 q7,q7
mov r5,r5,ror#20
vrev32.16 q11,q11
add r9,r9,r12
vadd.i32 q2,q2,q3
mov r6,r6,ror#20
vadd.i32 q6,q6,q7
eor r5,r5,r8,ror#20
vadd.i32 q10,q10,q11
eor r6,r6,r9,ror#20
veor q12,q1,q2
add r0,r0,r5
veor q13,q5,q6
mov r10,r10,ror#24
veor q14,q9,q10
add r1,r1,r6
vshr.u32 q1,q12,#20
mov r12,r12,ror#24
vshr.u32 q5,q13,#20
eor r10,r10,r0,ror#24
vshr.u32 q9,q14,#20
eor r12,r12,r1,ror#24
vsli.32 q1,q12,#12
add r8,r8,r10
vsli.32 q5,q13,#12
mov r5,r5,ror#25
vsli.32 q9,q14,#12
str r10,[sp,#4*(16+15)]
vadd.i32 q0,q0,q1
ldr r10,[sp,#4*(16+13)]
vadd.i32 q4,q4,q5
add r9,r9,r12
vadd.i32 q8,q8,q9
mov r6,r6,ror#25
veor q12,q3,q0
eor r5,r5,r8,ror#25
veor q13,q7,q4
eor r6,r6,r9,ror#25
veor q14,q11,q8
str r8,[sp,#4*(16+10)]
vshr.u32 q3,q12,#24
ldr r8,[sp,#4*(16+8)]
vshr.u32 q7,q13,#24
add r2,r2,r7
vshr.u32 q11,q14,#24
mov r10,r10,ror#16
vsli.32 q3,q12,#8
str r9,[sp,#4*(16+11)]
vsli.32 q7,q13,#8
ldr r9,[sp,#4*(16+9)]
vsli.32 q11,q14,#8
add r3,r3,r4
vadd.i32 q2,q2,q3
mov r14,r14,ror#16
vadd.i32 q6,q6,q7
eor r10,r10,r2,ror#16
vadd.i32 q10,q10,q11
eor r14,r14,r3,ror#16
veor q12,q1,q2
add r8,r8,r10
veor q13,q5,q6
mov r7,r7,ror#20
veor q14,q9,q10
add r9,r9,r14
vshr.u32 q1,q12,#25
mov r4,r4,ror#20
vshr.u32 q5,q13,#25
eor r7,r7,r8,ror#20
vshr.u32 q9,q14,#25
eor r4,r4,r9,ror#20
vsli.32 q1,q12,#7
add r2,r2,r7
vsli.32 q5,q13,#7
mov r10,r10,ror#24
vsli.32 q9,q14,#7
add r3,r3,r4
vext.8 q2,q2,q2,#8
mov r14,r14,ror#24
vext.8 q6,q6,q6,#8
eor r10,r10,r2,ror#24
vext.8 q10,q10,q10,#8
eor r14,r14,r3,ror#24
vext.8 q1,q1,q1,#12
add r8,r8,r10
vext.8 q5,q5,q5,#12
mov r7,r7,ror#25
vext.8 q9,q9,q9,#12
add r9,r9,r14
vext.8 q3,q3,q3,#4
mov r4,r4,ror#25
vext.8 q7,q7,q7,#4
eor r7,r7,r8,ror#25
vext.8 q11,q11,q11,#4
eor r4,r4,r9,ror#25
bne Loop_neon
add r11,sp,#32
vld1.32 {q12,q13},[sp] @ load key material
vld1.32 {q14,q15},[r11]
ldr r11,[sp,#4*(32+2)] @ load len
str r8, [sp,#4*(16+8)] @ modulo-scheduled store
str r9, [sp,#4*(16+9)]
str r12,[sp,#4*(16+12)]
str r10, [sp,#4*(16+13)]
str r14,[sp,#4*(16+14)]
@ at this point we have first half of 512-bit result in
@ rx and second half at sp+4*(16+8)
ldr r12,[sp,#4*(32+1)] @ load inp
ldr r14,[sp,#4*(32+0)] @ load out
vadd.i32 q0,q0,q12 @ accumulate key material
vadd.i32 q4,q4,q12
vadd.i32 q8,q8,q12
vldr d24,[sp,#4*(16+0)] @ one
vadd.i32 q1,q1,q13
vadd.i32 q5,q5,q13
vadd.i32 q9,q9,q13
vldr d26,[sp,#4*(16+2)] @ two
vadd.i32 q2,q2,q14
vadd.i32 q6,q6,q14
vadd.i32 q10,q10,q14
vadd.i32 d14,d14,d24 @ counter+1
vadd.i32 d22,d22,d26 @ counter+2
vadd.i32 q3,q3,q15
vadd.i32 q7,q7,q15
vadd.i32 q11,q11,q15
cmp r11,#64*4
blo Ltail_neon
vld1.8 {q12,q13},[r12]! @ load input
mov r11,sp
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12 @ xor with input
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
vst1.8 {q0,q1},[r14]! @ store output
veor q5,q5,q13
vld1.8 {q12,q13},[r12]!
veor q6,q6,q14
vst1.8 {q2,q3},[r14]!
veor q7,q7,q15
vld1.8 {q14,q15},[r12]!
veor q8,q8,q12
vld1.32 {q0,q1},[r11]! @ load for next iteration
veor d25,d25,d25
vldr d24,[sp,#4*(16+4)] @ four
veor q9,q9,q13
vld1.32 {q2,q3},[r11]
veor q10,q10,q14
vst1.8 {q4,q5},[r14]!
veor q11,q11,q15
vst1.8 {q6,q7},[r14]!
vadd.i32 d6,d6,d24 @ next counter value
vldr d24,[sp,#4*(16+0)] @ one
ldmia sp,{r8,r9,r10,r11} @ load key material
add r0,r0,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
vst1.8 {q8,q9},[r14]!
add r1,r1,r9
ldr r9,[r12,#-12]
vst1.8 {q10,q11},[r14]!
add r2,r2,r10
ldr r10,[r12,#-8]
add r3,r3,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
eor r0,r0,r8 @ xor with input
add r8,sp,#4*(4)
eor r1,r1,r9
str r0,[r14],#16 @ store output
eor r2,r2,r10
str r1,[r14,#-12]
eor r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
add r5,r5,r9
ldr r9,[r12,#-12]
add r6,r6,r10
ldr r10,[r12,#-8]
add r7,r7,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
eor r4,r4,r8
add r8,sp,#4*(8)
eor r5,r5,r9
str r4,[r14],#16 @ store output
eor r6,r6,r10
str r5,[r14,#-12]
eor r7,r7,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r6,[r14,#-8]
add r0,sp,#4*(16+8)
str r7,[r14,#-4]
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
ldr r8,[r12],#16 @ load input
add r1,r1,r9
ldr r9,[r12,#-12]
# ifdef __thumb2__
it hi
# endif
strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it
add r2,r2,r10
ldr r10,[r12,#-8]
# ifdef __thumb2__
it hi
# endif
strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it
add r3,r3,r11
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
# endif
eor r0,r0,r8
add r8,sp,#4*(12)
eor r1,r1,r9
str r0,[r14],#16 @ store output
eor r2,r2,r10
str r1,[r14,#-12]
eor r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
str r2,[r14,#-8]
str r3,[r14,#-4]
add r4,r4,r8 @ accumulate key material
add r8,r8,#4 @ next counter value
add r5,r5,r9
str r8,[sp,#4*(12)] @ save next counter value
ldr r8,[r12],#16 @ load input
add r6,r6,r10
add r4,r4,#3 @ counter+3
ldr r9,[r12,#-12]
add r7,r7,r11
ldr r10,[r12,#-8]
ldr r11,[r12,#-4]
# ifdef __ARMEB__
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
eor r4,r4,r8
# ifdef __thumb2__
it hi
# endif
ldrhi r8,[sp,#4*(32+2)] @ re-load len
eor r5,r5,r9
eor r6,r6,r10
str r4,[r14],#16 @ store output
eor r7,r7,r11
str r5,[r14,#-12]
sub r11,r8,#64*4 @ len-=64*4
str r6,[r14,#-8]
str r7,[r14,#-4]
bhi Loop_neon_outer
b Ldone_neon
.align 4
Lbreak_neon:
@ harmonize NEON and integer-only stack frames: load data
@ from NEON frame, but save to integer-only one; distance
@ between the two is 4*(32+4+16-32)=4*(20).
str r11, [sp,#4*(20+32+2)] @ save len
add r11,sp,#4*(32+4)
str r12, [sp,#4*(20+32+1)] @ save inp
str r14, [sp,#4*(20+32+0)] @ save out
ldr r12,[sp,#4*(16+10)]
ldr r14,[sp,#4*(16+11)]
vldmia r11,{d8,d9,d10,d11,d12,d13,d14,d15} @ fulfill ABI requirement
str r12,[sp,#4*(20+16+10)] @ copy "rx"
str r14,[sp,#4*(20+16+11)] @ copy "rx"
ldr r11, [sp,#4*(15)]
ldr r12,[sp,#4*(12)] @ modulo-scheduled load
ldr r10, [sp,#4*(13)]
ldr r14,[sp,#4*(14)]
str r11, [sp,#4*(20+16+15)]
add r11,sp,#4*(20)
vst1.32 {q0,q1},[r11]! @ copy key
add sp,sp,#4*(20) @ switch frame
vst1.32 {q2,q3},[r11]
mov r11,#10
b Loop @ go integer-only
.align 4
Ltail_neon:
cmp r11,#64*3
bhs L192_or_more_neon
cmp r11,#64*2
bhs L128_or_more_neon
cmp r11,#64*1
bhs L64_or_more_neon
add r8,sp,#4*(8)
vst1.8 {q0,q1},[sp]
add r10,sp,#4*(0)
vst1.8 {q2,q3},[r8]
b Loop_tail_neon
.align 4
L64_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
veor q2,q2,q14
veor q3,q3,q15
vst1.8 {q0,q1},[r14]!
vst1.8 {q2,q3},[r14]!
beq Ldone_neon
add r8,sp,#4*(8)
vst1.8 {q4,q5},[sp]
add r10,sp,#4*(0)
vst1.8 {q6,q7},[r8]
sub r11,r11,#64*1 @ len-=64*1
b Loop_tail_neon
.align 4
L128_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
veor q5,q5,q13
vst1.8 {q0,q1},[r14]!
veor q6,q6,q14
vst1.8 {q2,q3},[r14]!
veor q7,q7,q15
vst1.8 {q4,q5},[r14]!
vst1.8 {q6,q7},[r14]!
beq Ldone_neon
add r8,sp,#4*(8)
vst1.8 {q8,q9},[sp]
add r10,sp,#4*(0)
vst1.8 {q10,q11},[r8]
sub r11,r11,#64*2 @ len-=64*2
b Loop_tail_neon
.align 4
L192_or_more_neon:
vld1.8 {q12,q13},[r12]!
vld1.8 {q14,q15},[r12]!
veor q0,q0,q12
veor q1,q1,q13
vld1.8 {q12,q13},[r12]!
veor q2,q2,q14
veor q3,q3,q15
vld1.8 {q14,q15},[r12]!
veor q4,q4,q12
veor q5,q5,q13
vld1.8 {q12,q13},[r12]!
veor q6,q6,q14
vst1.8 {q0,q1},[r14]!
veor q7,q7,q15
vld1.8 {q14,q15},[r12]!
veor q8,q8,q12
vst1.8 {q2,q3},[r14]!
veor q9,q9,q13
vst1.8 {q4,q5},[r14]!
veor q10,q10,q14
vst1.8 {q6,q7},[r14]!
veor q11,q11,q15
vst1.8 {q8,q9},[r14]!
vst1.8 {q10,q11},[r14]!
beq Ldone_neon
ldmia sp,{r8,r9,r10,r11} @ load key material
add r0,r0,r8 @ accumulate key material
add r8,sp,#4*(4)
add r1,r1,r9
add r2,r2,r10
add r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
add r8,sp,#4*(8)
add r5,r5,r9
add r6,r6,r10
add r7,r7,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
stmia sp,{r0,r1,r2,r3,r4,r5,r6,r7}
add r0,sp,#4*(16+8)
ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half
add r0,r0,r8 @ accumulate key material
add r8,sp,#4*(12)
add r1,r1,r9
add r2,r2,r10
add r3,r3,r11
ldmia r8,{r8,r9,r10,r11} @ load key material
add r4,r4,r8 @ accumulate key material
add r8,sp,#4*(8)
add r5,r5,r9
add r4,r4,#3 @ counter+3
add r6,r6,r10
add r7,r7,r11
ldr r11,[sp,#4*(32+2)] @ re-load len
# ifdef __ARMEB__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
rev r4,r4
rev r5,r5
rev r6,r6
rev r7,r7
# endif
stmia r8,{r0,r1,r2,r3,r4,r5,r6,r7}
add r10,sp,#4*(0)
sub r11,r11,#64*3 @ len-=64*3
Loop_tail_neon:
ldrb r8,[r10],#1 @ read buffer on stack
ldrb r9,[r12],#1 @ read input
subs r11,r11,#1
eor r8,r8,r9
strb r8,[r14],#1 @ store output
bne Loop_tail_neon
Ldone_neon:
add sp,sp,#4*(32+4)
vldmia sp,{d8,d9,d10,d11,d12,d13,d14,d15}
add sp,sp,#4*(16+3)
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__APPLE__)
|
WSFcloud/rCore-OS
| 2,218
|
os/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_k
.globl __restore_k
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret
|
wust-2025oscomp-NovaOS/NovaOS
| 2,218
|
os/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_k
.globl __restore_k
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret
|
XajilX/my_rcore
| 1,543
|
os/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __trap_entry
.globl __restore
.globl __trap_entry_k
.globl __restore_k
.align 2
__trap_entry:
csrrw sp, sscratch, sp
sd x1, 8(sp)
sd x3, 24(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
csrr t2, sscratch
sd t2, 16(sp)
# satp
ld t0, 34*8(sp)
# trap_handler
ld t1, 36*8(sp)
# kernel sp
ld sp, 35*8(sp)
# switch to kernel sapce
csrw satp, t0
sfence.vma
jr t1
__restore:
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 8(sp)
ld x3, 24(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
ld sp, 16(sp)
sret
.align 2
__trap_entry_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret
|
xavierrouth/rustc-cs256
| 11,809
|
library/std/src/sys/pal/sgx/abi/entry.S
|
/* This symbol is used at runtime to figure out the virtual address that the */
/* enclave is loaded at. */
.section absolute
.global IMAGE_BASE
IMAGE_BASE:
.section ".note.x86_64-fortanix-unknown-sgx", "", @note
.align 4
.long 1f - 0f /* name length (not including padding) */
.long 3f - 2f /* desc length (not including padding) */
.long 1 /* type = NT_VERSION */
0: .asciz "toolchain-version" /* name */
1: .align 4
2: .long 1 /* desc - toolchain version number, 32-bit LE */
3: .align 4
.section .rodata
/* The XSAVE area needs to be a large chunk of readable memory, but since we are */
/* going to restore everything to its initial state (XSTATE_BV=0), only certain */
/* parts need to have a defined value. In particular: */
/* */
/* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */
/* RFBM[2] is set, regardless of the value of XSTATE_BV */
/* * XSAVE header */
.align 64
.Lxsave_clear:
.org .+24
.Lxsave_mxcsr:
.short 0x1fbf
/* We can store a bunch of data in the gap between MXCSR and the XSAVE header */
/* The following symbols point at read-only data that will be filled in by the */
/* post-linker. */
/* When using this macro, don't forget to adjust the linker version script! */
.macro globvar name:req size:req
.global \name
.protected \name
.align \size
.size \name , \size
\name :
.org .+\size
.endm
/* The base address (relative to enclave start) of the heap area */
globvar HEAP_BASE 8
/* The heap size in bytes */
globvar HEAP_SIZE 8
/* Value of the RELA entry in the dynamic table */
globvar RELA 8
/* Value of the RELACOUNT entry in the dynamic table */
globvar RELACOUNT 8
/* The enclave size in bytes */
globvar ENCLAVE_SIZE 8
/* The base address (relative to enclave start) of the enclave configuration area */
globvar CFGDATA_BASE 8
/* Non-zero if debugging is enabled, zero otherwise */
globvar DEBUG 1
/* The base address (relative to enclave start) of the enclave text section */
globvar TEXT_BASE 8
/* The size in bytes of enclave text section */
globvar TEXT_SIZE 8
/* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_OFFSET 8
/* The size in bytes of enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_LEN 8
/* The base address (relative to enclave start) of the enclave .eh_frame section */
globvar EH_FRM_OFFSET 8
/* The size in bytes of enclave .eh_frame section */
globvar EH_FRM_LEN 8
.org .Lxsave_clear+512
.Lxsave_header:
.int 0, 0 /* XSTATE_BV */
.int 0, 0 /* XCOMP_BV */
.org .+48 /* reserved bits */
.data
.Laborted:
.byte 0
/* TCS local storage section */
.equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */
.equ tcsls_flags, 0x08 /* initialized by loader */
.equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */
.equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */
/* 14 unused bits */
.equ tcsls_user_fcw, 0x0a
.equ tcsls_user_mxcsr, 0x0c
.equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */
.equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */
.equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */
.equ tcsls_user_rsp, 0x28
.equ tcsls_user_retip, 0x30
.equ tcsls_user_rbp, 0x38
.equ tcsls_user_r12, 0x40
.equ tcsls_user_r13, 0x48
.equ tcsls_user_r14, 0x50
.equ tcsls_user_r15, 0x58
.equ tcsls_tls_ptr, 0x60
.equ tcsls_tcs_addr, 0x68
.macro load_tcsls_flag_secondary_bool reg:req comments:vararg
.ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */
.abort
.endif
mov $(1<<tcsls_flag_secondary),%e\reg
and %gs:tcsls_flags,%\reg
.endm
/* We place the ELF entry point in a separate section so it can be removed by
elf2sgxs */
.section .text_no_sgx, "ax"
.Lelf_entry_error_msg:
.ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n"
.Lelf_entry_error_msg_end:
.global elf_entry
.type elf_entry,function
elf_entry:
/* print error message */
movq $2,%rdi /* write to stderr (fd 2) */
lea .Lelf_entry_error_msg(%rip),%rsi
movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx
.Lelf_entry_call:
movq $1,%rax /* write() syscall */
syscall
test %rax,%rax
jle .Lelf_exit /* exit on error */
add %rax,%rsi
sub %rax,%rdx /* all chars written? */
jnz .Lelf_entry_call
.Lelf_exit:
movq $60,%rax /* exit() syscall */
movq $1,%rdi /* exit code 1 */
syscall
ud2 /* should not be reached */
/* end elf_entry */
/* This code needs to be called *after* the enclave stack has been setup. */
/* There are 3 places where this needs to happen, so this is put in a macro. */
.macro entry_sanitize_final
/* Sanitize rflags received from user */
/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */
/* - AC flag: AEX on misaligned memory accesses leaks side channel info */
pushfq
andq $~0x40400, (%rsp)
popfq
/* check for abort */
bt $0,.Laborted(%rip)
jc .Lreentry_panic
.endm
.text
.global sgx_entry
.type sgx_entry,function
sgx_entry:
/* save user registers */
mov %rcx,%gs:tcsls_user_retip
mov %rsp,%gs:tcsls_user_rsp
mov %rbp,%gs:tcsls_user_rbp
mov %r12,%gs:tcsls_user_r12
mov %r13,%gs:tcsls_user_r13
mov %r14,%gs:tcsls_user_r14
mov %r15,%gs:tcsls_user_r15
mov %rbx,%gs:tcsls_tcs_addr
stmxcsr %gs:tcsls_user_mxcsr
fnstcw %gs:tcsls_user_fcw
/* check for debug buffer pointer */
testb $0xff,DEBUG(%rip)
jz .Lskip_debug_init
mov %r10,%gs:tcsls_debug_panic_buf_ptr
.Lskip_debug_init:
/* reset cpu state */
mov %rdx, %r10
mov $-1, %rax
mov $-1, %rdx
xrstor .Lxsave_clear(%rip)
lfence
mov %r10, %rdx
/* check if returning from usercall */
mov %gs:tcsls_last_rsp,%r11
test %r11,%r11
jnz .Lusercall_ret
/* setup stack */
mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */
/* here. This is fixed below under "adjust stack". */
/* check for thread init */
bts $tcsls_flag_init_once,%gs:tcsls_flags
jc .Lskip_init
/* adjust stack */
lea IMAGE_BASE(%rip),%rax
add %rax,%rsp
mov %rsp,%gs:tcsls_tos
entry_sanitize_final
/* call tcs_init */
/* store caller-saved registers in callee-saved registers */
mov %rdi,%rbx
mov %rsi,%r12
mov %rdx,%r13
mov %r8,%r14
mov %r9,%r15
load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */
call tcs_init
/* reload caller-saved registers */
mov %rbx,%rdi
mov %r12,%rsi
mov %r13,%rdx
mov %r14,%r8
mov %r15,%r9
jmp .Lafter_init
.Lskip_init:
entry_sanitize_final
.Lafter_init:
/* call into main entry point */
load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */
call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */
mov %rax,%rsi /* RSI = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
xor %rdi,%rdi /* RDI = normal exit */
.Lexit:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set later */
/* RCX overwritten by ENCLU */
/* RDX contains return value */
/* RSP set later */
/* RBP set later */
/* RDI contains exit mode */
/* RSI contains return value */
xor %r8,%r8
xor %r9,%r9
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
.Lsgx_exit:
/* clear extended register state */
mov %rdx, %rcx /* save RDX */
mov $-1, %rax
mov %rax, %rdx
xrstor .Lxsave_clear(%rip)
mov %rcx, %rdx /* restore RDX */
/* clear flags */
pushq $0
popfq
/* restore user registers */
mov %gs:tcsls_user_r12,%r12
mov %gs:tcsls_user_r13,%r13
mov %gs:tcsls_user_r14,%r14
mov %gs:tcsls_user_r15,%r15
mov %gs:tcsls_user_retip,%rbx
mov %gs:tcsls_user_rsp,%rsp
mov %gs:tcsls_user_rbp,%rbp
fldcw %gs:tcsls_user_fcw
ldmxcsr %gs:tcsls_user_mxcsr
/* exit enclave */
mov $0x4,%eax /* EEXIT */
enclu
/* end sgx_entry */
.Lreentry_panic:
orq $8,%rsp
jmp abort_reentry
/* This *MUST* be called with 6 parameters, otherwise register information */
/* might leak! */
.global usercall
usercall:
test %rcx,%rcx /* check `abort` function argument */
jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */
jmp .Lusercall_save_state /* non-aborting usercall */
.Lusercall_abort:
/* set aborted bit */
movb $1,.Laborted(%rip)
/* save registers in DEBUG mode, so that debugger can reconstruct the stack */
testb $0xff,DEBUG(%rip)
jz .Lusercall_noreturn
.Lusercall_save_state:
/* save callee-saved state */
push %r15
push %r14
push %r13
push %r12
push %rbp
push %rbx
sub $8, %rsp
fstcw 4(%rsp)
stmxcsr (%rsp)
movq %rsp,%gs:tcsls_last_rsp
.Lusercall_noreturn:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set by sgx_exit */
/* RCX overwritten by ENCLU */
/* RDX contains parameter */
/* RSP set by sgx_exit */
/* RBP set by sgx_exit */
/* RDI contains parameter */
/* RSI contains parameter */
/* R8 contains parameter */
/* R9 contains parameter */
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
/* extended registers/flags cleared by sgx_exit */
/* exit */
jmp .Lsgx_exit
.Lusercall_ret:
movq $0,%gs:tcsls_last_rsp
/* restore callee-saved state, cf. "save" above */
mov %r11,%rsp
/* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */
/* vector instructions is used. We omit the lfence here as one is required before */
/* the jmp instruction anyway. */
ldmxcsr (%rsp)
fldcw 4(%rsp)
add $8, %rsp
entry_sanitize_final
pop %rbx
pop %rbp
pop %r12
pop %r13
pop %r14
pop %r15
/* return */
mov %rsi,%rax /* RAX = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
pop %r11
lfence
jmp *%r11
/*
The following functions need to be defined externally:
```
// Called by entry code on re-entry after exit
extern "C" fn abort_reentry() -> !;
// Called once when a TCS is first entered
extern "C" fn tcs_init(secondary: bool);
// Standard TCS entrypoint
extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64);
```
*/
.global get_tcs_addr
get_tcs_addr:
mov %gs:tcsls_tcs_addr,%rax
pop %r11
lfence
jmp *%r11
.global get_tls_ptr
get_tls_ptr:
mov %gs:tcsls_tls_ptr,%rax
pop %r11
lfence
jmp *%r11
.global set_tls_ptr
set_tls_ptr:
mov %rdi,%gs:tcsls_tls_ptr
pop %r11
lfence
jmp *%r11
.global take_debug_panic_buf_ptr
take_debug_panic_buf_ptr:
xor %rax,%rax
xchg %gs:tcsls_debug_panic_buf_ptr,%rax
pop %r11
lfence
jmp *%r11
|
xiaomo-xty/xux-core
| 4,416
|
os/src/trap/trap.S
|
.altmacro
# sp[n] = reg.x<n>
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
# reg.x<n> = sp[n]
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
# trampoline code symbol
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_kernel
.globl __restore_kernel
.align 2
__alltraps:
# (sp)<->(sscratch)
# Switch sp to the trap context page
csrrw sp, sscratch, sp
# ======================STEP [1]=================================
# | Save general registers , except sp, and x0 |
# |_____________________________________________________________|
# Skip x0 = 0 (x0 is always zero and doesn't need to be saved)
# Save x1 to x31 (general-purpose registers) to the kernel stack
sd x1, 1*8(sp)
# Skip x2 (sp), as it is saved in sscratch
sd x3, 3*8(sp)
# Skip x4 (tp), as it is unnessasary
sd x4, 4*8(sp)
# Save general-purpose registers x5 to x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# ======================STEP [2]=================================
# | Save special registers |
# |_____________________________________________________________|
# Save the supervisor status register (sstatus) to the stack
csrr t0, sstatus
# Save the supervisor exception program counter (sepc) to the stack
csrr t1, sepc
# Save sstatus, sepc, and sscratch to the kernel stack
sd t0, 32*8(sp) # Save sstatus
sd t1, 33*8(sp) # Save sepc (return address)
# Save sscratch (user stack pointer) to the stack
csrr t2, sscratch
sd t2, 2*8(sp) # Save sscratch (user stack pointer)
# load kernel_satp into t0
ld t0, 34*8(sp)
# restore kernel tp
ld tp, 36*8(sp)
# load trap_handler into t1
ld t1, 37*8(sp)
# load kernel_sp into sp
# switch stack
ld sp, 35*8(sp)
# ======================STEP [3]=================================
# | Calling trap_handler |
# |_____________________________________________________________|
# (cx: &mut TrapContext)
# switch to kernel page table
csrw satp, t0
sfence.vma
# call trap_handler
jr t1
# ## Trap Return Point (`__restore`)
#
# ### Execution Flow
# 1. **User Context Setup**:
# - Switches to the user page table (`satp` CSR).
# - Restores user stack pointer from `sscratch`.
#
# 2. **Register Restore**:
# - Reloads `sstatus` (CPU state) and `sepc` (return address).
# - Restores general-purpose registers (x1-x31).
#
# 3. **User Space Resume**:
# - Executes `sret` to return to user code at `sepc`.
#
# ### Usage
# - Called after `trap_handler` completes to resume user execution.
# - `a0`: Pointer to `TrapContext` on user stack.
# - `a1`: User space page table token.
# fn __restore(ctx_addr: usize);
# - case1: start running app by __restore
# - case2: back to U after handling trap
# ctx_addr: usize
# __restore(KERNEL_STACK.push_context(...))
__restore:
# a0: *TrapContext in user space(Constant);
# a1: user space token.
# switch to user pagetable
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# Now sp points to TrapContext in user space
# start restoring based on it
ld t0, 32*8(sp) # load ctx.sstatus to t0
ld t1, 33*8(sp) # load ctx.spec to t1
csrw sstatus, t0 # Restore sstatus
csrw sepc, t1 # Restore spec to return address
# Restore the general-purpose registers
# except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# switch to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_kernel:
# allocate 34*8 for TrapContext
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
# kernel trap handler
csrr t2, sscratch
jalr t2
__restore_kernel:
# load sstatus
ld t0, 32*8(sp)
# load sepc
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp,34*8
sret
|
xiaomo-xty/xux-core
| 2,602
|
os/src/task/switch.S
|
.altmacro
# (n+2)*8(a0): ctx.s[n+1]
# SAVE_SN n : ctx.s[n+1] = reg.s<n>
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
# (n+2)*8(a1): ctx.s[n+1]
# LOAD_SN n : reg.s<n> = ctx.s[n+1]
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
# TaskContext Layout in Memory:
#
# ┌───────────────────────────────────────┐
# │ return address │ <- offset 0 (ra)
# │ (e.g., __restore in __switch) │
# ├───────────────────────────────────────┤
# │ stack pointer │ <- offset 8 (sp)
# │ (kernel stack pointer of app) │
# ├───────────────────────────────────────┤ <- offset 16 (s[0..11])
# │ ┌─────────────────────┐ │ (callee saved registers: s0..s11)
# │ │ saved register s0 │ <- offset 16 │
# │ ├─────────────────────┤ │
# │ │ saved register s1 │ <- offset 24 │
# │ ├─────────────────────┤ │
# │ │ ... │ │
# │ ├─────────────────────┤ │
# │ │ saved register s11 │ <- offset 104 │
# │ └─────────────────────┘ │
# └───────────────────────────────────────┘
# __switch
#(
# current_task_cx_ptr: *mut TaskContext,
# |
# └──>[a0]
#
# next_task_cx_ptr : *const TaskContext
# |
# └──>[a1]
# )
__switch:
# ======================STEP [1]=================================
# | Save [ra] and [s0~s11] to current_task_cx_ptr |
# |_____________________________________________________________|
# current_task_cx_ptr.sp = reg.sp
sd sp, 8(a0)
# current_task_cx_ptr.ra = reg.ra
sd ra, 0(a0)
# reg.s<0..=11>
# |
# v
# next_task_cx_ptr.s[1..=12]
.set n, 0
.rept 12
SAVE_SN %n
.set n, n+1
.endr
# ======================STEP [2]=================================
# | Restore [ra] and [s0~s11] from next_task_cx_ptr |
# |_____________________________________________________________|
# ra = next_task_cx_ptr.ra
# in generally, ra point to <trap.S __restore>, so that resotre
ld ra, 0(a1)
# next_task_cx_ptr.s[1..=12]
# |
# v
# reg.s<0..=11>
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# sp = next_task_cx_ptr.sp
ld sp, 8(a1)
ret
|
Xiaozxiaobai/lab-lazyalloc
| 2,420
|
src/asm/kernelvec.S
|
# from xv6-riscv:
# interrupts and exceptions while in supervisor
# mode come here.
#
# push all registers, call kerneltrap(), restore, return.
#
.section .text
.globl kernelvec
.align 4
kernelvec:
// make room to save registers.
addi sp, sp, -256
// save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
// call the trap handler in trap.rs
call kerneltrap
// restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
// not this, in case we moved CPUs: ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
// return to whatever we were doing in the kernel.
sret
# from xv6-riscv:
# machine-mode timer interrupt.
#
.section .text
.globl timervec
.align 4
timervec:
# start.rs has set up the memory that mscratch points to:
# scratch[0,8,16] : register save area.
# scratch[32] : address of CLINT's MTIMECMP register.
# scratch[40] : desired interval between interrupts.
csrrw a0, mscratch, a0
sd a1, 0(a0)
sd a2, 8(a0)
sd a3, 16(a0)
# schedule the next timer interrupt
# by adding interval to mtimecmp.
ld a1, 32(a0) # CLINT_MTIMECMP(hart)
ld a2, 40(a0) # interval
ld a3, 0(a1)
add a3, a3, a2
sd a3, 0(a1)
# raise a supervisor software interrupt.
li a1, 2
csrw sip, a1
ld a3, 16(a0)
ld a2, 8(a0)
ld a1, 0(a0)
csrrw a0, mscratch, a0
mret
|
Xiaozxiaobai/lab-lazyalloc
| 3,489
|
src/asm/trampoline.S
|
# from xv6-riscv:
# code used to switch context between user and kernel space
#
# this code is mapped at the same virtual address
# (TRAMPOLINE) in user and kernel space so that
# it continues to work when it switches page tables.
#
# note: code size here should not be larger than a page,
# and kernel.ld will align the page for trampsec section
#
# diff: swap the region of userret and uservec,
# because rust can not call userret directly then.
# otherwise rust code need to add some address to the function pointer,
# which is not allowed.
.section trampsec
.globl trampoline
trampoline:
.globl uservec
uservec:
# user_trap_ret() sets stvec to point here, so
# traps from user space start here,
# in supervisor mode, but with a
# user page table.
#
# sscratch points to where the process's p->tf is
# mapped into user space, at TRAPFRAME.
#
# swap a0 and sscratch
# so that a0 is TRAPFRAME
csrrw a0, sscratch, a0
# save the user registers in TRAPFRAME
sd ra, 40(a0)
sd sp, 48(a0)
sd gp, 56(a0)
sd tp, 64(a0)
sd t0, 72(a0)
sd t1, 80(a0)
sd t2, 88(a0)
sd s0, 96(a0)
sd s1, 104(a0)
sd a1, 120(a0)
sd a2, 128(a0)
sd a3, 136(a0)
sd a4, 144(a0)
sd a5, 152(a0)
sd a6, 160(a0)
sd a7, 168(a0)
sd s2, 176(a0)
sd s3, 184(a0)
sd s4, 192(a0)
sd s5, 200(a0)
sd s6, 208(a0)
sd s7, 216(a0)
sd s8, 224(a0)
sd s9, 232(a0)
sd s10, 240(a0)
sd s11, 248(a0)
sd t3, 256(a0)
sd t4, 264(a0)
sd t5, 272(a0)
sd t6, 280(a0)
# save the user a0 in p->tf->a0
csrr t0, sscratch
sd t0, 112(a0)
# save the user program counter
csrr t0, sepc
sd t0, 24(a0)
# restore kernel stack pointer from p->tf->kernel_sp
ld sp, 8(a0)
# make tp hold the current hartid, from p->tf->kernel_hartid
ld tp, 32(a0)
# load the address of user_trap(), p->tf->kernel_trap
ld t0, 16(a0)
# restore kernel page table from p->tf->kernel_satp
ld t1, 0(a0)
csrw satp, t1
sfence.vma zero, zero
# a0 is no longer valid, since the kernel page
# table does not specially map p->tf.
# jump to user_trap(), which does not return
jr t0
.align 4
.globl userret
userret:
# userret(TRAPFRAME, pagetable)
# switch from kernel to user.
# usertrapret() calls here.
# a0: TRAPFRAME, in user page table.
# a1: user page table, for satp.
# switch to the user page table.
csrw satp, a1
sfence.vma zero, zero
# put the saved user a0 in sscratch, so we
# can swap it with our a0 (TRAPFRAME) in the last step.
ld t0, 112(a0)
csrw sscratch, t0
# restore all but a0 from TRAPFRAME
ld ra, 40(a0)
ld sp, 48(a0)
ld gp, 56(a0)
ld tp, 64(a0)
ld t0, 72(a0)
ld t1, 80(a0)
ld t2, 88(a0)
ld s0, 96(a0)
ld s1, 104(a0)
ld a1, 120(a0)
ld a2, 128(a0)
ld a3, 136(a0)
ld a4, 144(a0)
ld a5, 152(a0)
ld a6, 160(a0)
ld a7, 168(a0)
ld s2, 176(a0)
ld s3, 184(a0)
ld s4, 192(a0)
ld s5, 200(a0)
ld s6, 208(a0)
ld s7, 216(a0)
ld s8, 224(a0)
ld s9, 232(a0)
ld s10, 240(a0)
ld s11, 248(a0)
ld t3, 256(a0)
ld t4, 264(a0)
ld t5, 272(a0)
ld t6, 280(a0)
# restore user a0, and save TRAPFRAME in sscratch
csrrw a0, sscratch, a0
# return to user mode and user pc.
# user_trap_ret() set up sstatus and sepc.
sret
|
Xiaozxiaobai/lab-pagetable
| 2,420
|
src/asm/kernelvec.S
|
# from xv6-riscv:
# interrupts and exceptions while in supervisor
# mode come here.
#
# push all registers, call kerneltrap(), restore, return.
#
.section .text
.globl kernelvec
.align 4
kernelvec:
// make room to save registers.
addi sp, sp, -256
// save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
// call the trap handler in trap.rs
call kerneltrap
// restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
// not this, in case we moved CPUs: ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
// return to whatever we were doing in the kernel.
sret
# from xv6-riscv:
# machine-mode timer interrupt.
#
.section .text
.globl timervec
.align 4
timervec:
# start.rs has set up the memory that mscratch points to:
# scratch[0,8,16] : register save area.
# scratch[32] : address of CLINT's MTIMECMP register.
# scratch[40] : desired interval between interrupts.
csrrw a0, mscratch, a0
sd a1, 0(a0)
sd a2, 8(a0)
sd a3, 16(a0)
# schedule the next timer interrupt
# by adding interval to mtimecmp.
ld a1, 32(a0) # CLINT_MTIMECMP(hart)
ld a2, 40(a0) # interval
ld a3, 0(a1)
add a3, a3, a2
sd a3, 0(a1)
# raise a supervisor software interrupt.
li a1, 2
csrw sip, a1
ld a3, 16(a0)
ld a2, 8(a0)
ld a1, 0(a0)
csrrw a0, mscratch, a0
mret
|
Xiaozxiaobai/lab-pagetable
| 3,489
|
src/asm/trampoline.S
|
# from xv6-riscv:
# code used to switch context between user and kernel space
#
# this code is mapped at the same virtual address
# (TRAMPOLINE) in user and kernel space so that
# it continues to work when it switches page tables.
#
# note: code size here should not be larger than a page,
# and kernel.ld will align the page for trampsec section
#
# diff: swap the region of userret and uservec,
# because rust can not call userret directly then.
# otherwise rust code need to add some address to the function pointer,
# which is not allowed.
.section trampsec
.globl trampoline
trampoline:
.globl uservec
uservec:
# user_trap_ret() sets stvec to point here, so
# traps from user space start here,
# in supervisor mode, but with a
# user page table.
#
# sscratch points to where the process's p->tf is
# mapped into user space, at TRAPFRAME.
#
# swap a0 and sscratch
# so that a0 is TRAPFRAME
csrrw a0, sscratch, a0
# save the user registers in TRAPFRAME
sd ra, 40(a0)
sd sp, 48(a0)
sd gp, 56(a0)
sd tp, 64(a0)
sd t0, 72(a0)
sd t1, 80(a0)
sd t2, 88(a0)
sd s0, 96(a0)
sd s1, 104(a0)
sd a1, 120(a0)
sd a2, 128(a0)
sd a3, 136(a0)
sd a4, 144(a0)
sd a5, 152(a0)
sd a6, 160(a0)
sd a7, 168(a0)
sd s2, 176(a0)
sd s3, 184(a0)
sd s4, 192(a0)
sd s5, 200(a0)
sd s6, 208(a0)
sd s7, 216(a0)
sd s8, 224(a0)
sd s9, 232(a0)
sd s10, 240(a0)
sd s11, 248(a0)
sd t3, 256(a0)
sd t4, 264(a0)
sd t5, 272(a0)
sd t6, 280(a0)
# save the user a0 in p->tf->a0
csrr t0, sscratch
sd t0, 112(a0)
# save the user program counter
csrr t0, sepc
sd t0, 24(a0)
# restore kernel stack pointer from p->tf->kernel_sp
ld sp, 8(a0)
# make tp hold the current hartid, from p->tf->kernel_hartid
ld tp, 32(a0)
# load the address of user_trap(), p->tf->kernel_trap
ld t0, 16(a0)
# restore kernel page table from p->tf->kernel_satp
ld t1, 0(a0)
csrw satp, t1
sfence.vma zero, zero
# a0 is no longer valid, since the kernel page
# table does not specially map p->tf.
# jump to user_trap(), which does not return
jr t0
.align 4
.globl userret
userret:
# userret(TRAPFRAME, pagetable)
# switch from kernel to user.
# usertrapret() calls here.
# a0: TRAPFRAME, in user page table.
# a1: user page table, for satp.
# switch to the user page table.
csrw satp, a1
sfence.vma zero, zero
# put the saved user a0 in sscratch, so we
# can swap it with our a0 (TRAPFRAME) in the last step.
ld t0, 112(a0)
csrw sscratch, t0
# restore all but a0 from TRAPFRAME
ld ra, 40(a0)
ld sp, 48(a0)
ld gp, 56(a0)
ld tp, 64(a0)
ld t0, 72(a0)
ld t1, 80(a0)
ld t2, 88(a0)
ld s0, 96(a0)
ld s1, 104(a0)
ld a1, 120(a0)
ld a2, 128(a0)
ld a3, 136(a0)
ld a4, 144(a0)
ld a5, 152(a0)
ld a6, 160(a0)
ld a7, 168(a0)
ld s2, 176(a0)
ld s3, 184(a0)
ld s4, 192(a0)
ld s5, 200(a0)
ld s6, 208(a0)
ld s7, 216(a0)
ld s8, 224(a0)
ld s9, 232(a0)
ld s10, 240(a0)
ld s11, 248(a0)
ld t3, 256(a0)
ld t4, 264(a0)
ld t5, 272(a0)
ld t6, 280(a0)
# restore user a0, and save TRAPFRAME in sscratch
csrrw a0, sscratch, a0
# return to user mode and user pc.
# user_trap_ret() set up sstatus and sepc.
sret
|
Xiaozxiaobai/lab-schedule
| 2,420
|
src/asm/kernelvec.S
|
# from xv6-riscv:
# interrupts and exceptions while in supervisor
# mode come here.
#
# push all registers, call kerneltrap(), restore, return.
#
.section .text
.globl kernelvec
.align 4
kernelvec:
// make room to save registers.
addi sp, sp, -256
// save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
// call the trap handler in trap.rs
call kerneltrap
// restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
// not this, in case we moved CPUs: ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
// return to whatever we were doing in the kernel.
sret
# from xv6-riscv:
# machine-mode timer interrupt.
#
.section .text
.globl timervec
.align 4
timervec:
# start.rs has set up the memory that mscratch points to:
# scratch[0,8,16] : register save area.
# scratch[32] : address of CLINT's MTIMECMP register.
# scratch[40] : desired interval between interrupts.
csrrw a0, mscratch, a0
sd a1, 0(a0)
sd a2, 8(a0)
sd a3, 16(a0)
# schedule the next timer interrupt
# by adding interval to mtimecmp.
ld a1, 32(a0) # CLINT_MTIMECMP(hart)
ld a2, 40(a0) # interval
ld a3, 0(a1)
add a3, a3, a2
sd a3, 0(a1)
# raise a supervisor software interrupt.
li a1, 2
csrw sip, a1
ld a3, 16(a0)
ld a2, 8(a0)
ld a1, 0(a0)
csrrw a0, mscratch, a0
mret
|
Xiaozxiaobai/lab-schedule
| 3,489
|
src/asm/trampoline.S
|
# from xv6-riscv:
# code used to switch context between user and kernel space
#
# this code is mapped at the same virtual address
# (TRAMPOLINE) in user and kernel space so that
# it continues to work when it switches page tables.
#
# note: code size here should not be larger than a page,
# and kernel.ld will align the page for trampsec section
#
# diff: swap the region of userret and uservec,
# because rust can not call userret directly then.
# otherwise rust code need to add some address to the function pointer,
# which is not allowed.
.section trampsec
.globl trampoline
trampoline:
.globl uservec
uservec:
# user_trap_ret() sets stvec to point here, so
# traps from user space start here,
# in supervisor mode, but with a
# user page table.
#
# sscratch points to where the process's p->tf is
# mapped into user space, at TRAPFRAME.
#
# swap a0 and sscratch
# so that a0 is TRAPFRAME
csrrw a0, sscratch, a0
# save the user registers in TRAPFRAME
sd ra, 40(a0)
sd sp, 48(a0)
sd gp, 56(a0)
sd tp, 64(a0)
sd t0, 72(a0)
sd t1, 80(a0)
sd t2, 88(a0)
sd s0, 96(a0)
sd s1, 104(a0)
sd a1, 120(a0)
sd a2, 128(a0)
sd a3, 136(a0)
sd a4, 144(a0)
sd a5, 152(a0)
sd a6, 160(a0)
sd a7, 168(a0)
sd s2, 176(a0)
sd s3, 184(a0)
sd s4, 192(a0)
sd s5, 200(a0)
sd s6, 208(a0)
sd s7, 216(a0)
sd s8, 224(a0)
sd s9, 232(a0)
sd s10, 240(a0)
sd s11, 248(a0)
sd t3, 256(a0)
sd t4, 264(a0)
sd t5, 272(a0)
sd t6, 280(a0)
# save the user a0 in p->tf->a0
csrr t0, sscratch
sd t0, 112(a0)
# save the user program counter
csrr t0, sepc
sd t0, 24(a0)
# restore kernel stack pointer from p->tf->kernel_sp
ld sp, 8(a0)
# make tp hold the current hartid, from p->tf->kernel_hartid
ld tp, 32(a0)
# load the address of user_trap(), p->tf->kernel_trap
ld t0, 16(a0)
# restore kernel page table from p->tf->kernel_satp
ld t1, 0(a0)
csrw satp, t1
sfence.vma zero, zero
# a0 is no longer valid, since the kernel page
# table does not specially map p->tf.
# jump to user_trap(), which does not return
jr t0
.align 4
.globl userret
userret:
# userret(TRAPFRAME, pagetable)
# switch from kernel to user.
# usertrapret() calls here.
# a0: TRAPFRAME, in user page table.
# a1: user page table, for satp.
# switch to the user page table.
csrw satp, a1
sfence.vma zero, zero
# put the saved user a0 in sscratch, so we
# can swap it with our a0 (TRAPFRAME) in the last step.
ld t0, 112(a0)
csrw sscratch, t0
# restore all but a0 from TRAPFRAME
ld ra, 40(a0)
ld sp, 48(a0)
ld gp, 56(a0)
ld tp, 64(a0)
ld t0, 72(a0)
ld t1, 80(a0)
ld t2, 88(a0)
ld s0, 96(a0)
ld s1, 104(a0)
ld a1, 120(a0)
ld a2, 128(a0)
ld a3, 136(a0)
ld a4, 144(a0)
ld a5, 152(a0)
ld a6, 160(a0)
ld a7, 168(a0)
ld s2, 176(a0)
ld s3, 184(a0)
ld s4, 192(a0)
ld s5, 200(a0)
ld s6, 208(a0)
ld s7, 216(a0)
ld s8, 224(a0)
ld s9, 232(a0)
ld s10, 240(a0)
ld s11, 248(a0)
ld t3, 256(a0)
ld t4, 264(a0)
ld t5, 272(a0)
ld t6, 280(a0)
# restore user a0, and save TRAPFRAME in sscratch
csrrw a0, sscratch, a0
# return to user mode and user pc.
# user_trap_ret() set up sstatus and sepc.
sret
|
Xiaozxiaobai/lab-syscall
| 2,420
|
src/asm/kernelvec.S
|
# from xv6-riscv:
# interrupts and exceptions while in supervisor
# mode come here.
#
# push all registers, call kerneltrap(), restore, return.
#
.section .text
.globl kernelvec
.align 4
kernelvec:
// make room to save registers.
addi sp, sp, -256
// save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
// call the trap handler in trap.rs
call kerneltrap
// restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
// not this, in case we moved CPUs: ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
// return to whatever we were doing in the kernel.
sret
# from xv6-riscv:
# machine-mode timer interrupt.
#
.section .text
.globl timervec
.align 4
timervec:
# start.rs has set up the memory that mscratch points to:
# scratch[0,8,16] : register save area.
# scratch[32] : address of CLINT's MTIMECMP register.
# scratch[40] : desired interval between interrupts.
csrrw a0, mscratch, a0
sd a1, 0(a0)
sd a2, 8(a0)
sd a3, 16(a0)
# schedule the next timer interrupt
# by adding interval to mtimecmp.
ld a1, 32(a0) # CLINT_MTIMECMP(hart)
ld a2, 40(a0) # interval
ld a3, 0(a1)
add a3, a3, a2
sd a3, 0(a1)
# raise a supervisor software interrupt.
li a1, 2
csrw sip, a1
ld a3, 16(a0)
ld a2, 8(a0)
ld a1, 0(a0)
csrrw a0, mscratch, a0
mret
|
Xiaozxiaobai/lab-syscall
| 3,489
|
src/asm/trampoline.S
|
# from xv6-riscv:
# code used to switch context between user and kernel space
#
# this code is mapped at the same virtual address
# (TRAMPOLINE) in user and kernel space so that
# it continues to work when it switches page tables.
#
# note: code size here should not be larger than a page,
# and kernel.ld will align the page for trampsec section
#
# diff: swap the region of userret and uservec,
# because rust can not call userret directly then.
# otherwise rust code need to add some address to the function pointer,
# which is not allowed.
.section trampsec
.globl trampoline
trampoline:
.globl uservec
uservec:
# user_trap_ret() sets stvec to point here, so
# traps from user space start here,
# in supervisor mode, but with a
# user page table.
#
# sscratch points to where the process's p->tf is
# mapped into user space, at TRAPFRAME.
#
# swap a0 and sscratch
# so that a0 is TRAPFRAME
csrrw a0, sscratch, a0
# save the user registers in TRAPFRAME
sd ra, 40(a0)
sd sp, 48(a0)
sd gp, 56(a0)
sd tp, 64(a0)
sd t0, 72(a0)
sd t1, 80(a0)
sd t2, 88(a0)
sd s0, 96(a0)
sd s1, 104(a0)
sd a1, 120(a0)
sd a2, 128(a0)
sd a3, 136(a0)
sd a4, 144(a0)
sd a5, 152(a0)
sd a6, 160(a0)
sd a7, 168(a0)
sd s2, 176(a0)
sd s3, 184(a0)
sd s4, 192(a0)
sd s5, 200(a0)
sd s6, 208(a0)
sd s7, 216(a0)
sd s8, 224(a0)
sd s9, 232(a0)
sd s10, 240(a0)
sd s11, 248(a0)
sd t3, 256(a0)
sd t4, 264(a0)
sd t5, 272(a0)
sd t6, 280(a0)
# save the user a0 in p->tf->a0
csrr t0, sscratch
sd t0, 112(a0)
# save the user program counter
csrr t0, sepc
sd t0, 24(a0)
# restore kernel stack pointer from p->tf->kernel_sp
ld sp, 8(a0)
# make tp hold the current hartid, from p->tf->kernel_hartid
ld tp, 32(a0)
# load the address of user_trap(), p->tf->kernel_trap
ld t0, 16(a0)
# restore kernel page table from p->tf->kernel_satp
ld t1, 0(a0)
csrw satp, t1
sfence.vma zero, zero
# a0 is no longer valid, since the kernel page
# table does not specially map p->tf.
# jump to user_trap(), which does not return
jr t0
.align 4
.globl userret
userret:
# userret(TRAPFRAME, pagetable)
# switch from kernel to user.
# usertrapret() calls here.
# a0: TRAPFRAME, in user page table.
# a1: user page table, for satp.
# switch to the user page table.
csrw satp, a1
sfence.vma zero, zero
# put the saved user a0 in sscratch, so we
# can swap it with our a0 (TRAPFRAME) in the last step.
ld t0, 112(a0)
csrw sscratch, t0
# restore all but a0 from TRAPFRAME
ld ra, 40(a0)
ld sp, 48(a0)
ld gp, 56(a0)
ld tp, 64(a0)
ld t0, 72(a0)
ld t1, 80(a0)
ld t2, 88(a0)
ld s0, 96(a0)
ld s1, 104(a0)
ld a1, 120(a0)
ld a2, 128(a0)
ld a3, 136(a0)
ld a4, 144(a0)
ld a5, 152(a0)
ld a6, 160(a0)
ld a7, 168(a0)
ld s2, 176(a0)
ld s3, 184(a0)
ld s4, 192(a0)
ld s5, 200(a0)
ld s6, 208(a0)
ld s7, 216(a0)
ld s8, 224(a0)
ld s9, 232(a0)
ld s10, 240(a0)
ld s11, 248(a0)
ld t3, 256(a0)
ld t4, 264(a0)
ld t5, 272(a0)
ld t6, 280(a0)
# restore user a0, and save TRAPFRAME in sscratch
csrrw a0, sscratch, a0
# return to user mode and user pc.
# user_trap_ret() set up sstatus and sepc.
sret
|
Xiaozxiaobai/lab-test
| 2,420
|
src/asm/kernelvec.S
|
# from xv6-riscv:
# interrupts and exceptions while in supervisor
# mode come here.
#
# push all registers, call kerneltrap(), restore, return.
#
.section .text
.globl kernelvec
.align 4
kernelvec:
// make room to save registers.
addi sp, sp, -256
// save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
// call the trap handler in trap.rs
call kerneltrap
// restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
// not this, in case we moved CPUs: ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
// return to whatever we were doing in the kernel.
sret
# from xv6-riscv:
# machine-mode timer interrupt.
#
.section .text
.globl timervec
.align 4
timervec:
# start.rs has set up the memory that mscratch points to:
# scratch[0,8,16] : register save area.
# scratch[32] : address of CLINT's MTIMECMP register.
# scratch[40] : desired interval between interrupts.
csrrw a0, mscratch, a0
sd a1, 0(a0)
sd a2, 8(a0)
sd a3, 16(a0)
# schedule the next timer interrupt
# by adding interval to mtimecmp.
ld a1, 32(a0) # CLINT_MTIMECMP(hart)
ld a2, 40(a0) # interval
ld a3, 0(a1)
add a3, a3, a2
sd a3, 0(a1)
# raise a supervisor software interrupt.
li a1, 2
csrw sip, a1
ld a3, 16(a0)
ld a2, 8(a0)
ld a1, 0(a0)
csrrw a0, mscratch, a0
mret
|
Xiaozxiaobai/lab-test
| 3,489
|
src/asm/trampoline.S
|
# from xv6-riscv:
# code used to switch context between user and kernel space
#
# this code is mapped at the same virtual address
# (TRAMPOLINE) in user and kernel space so that
# it continues to work when it switches page tables.
#
# note: code size here should not be larger than a page,
# and kernel.ld will align the page for trampsec section
#
# diff: swap the region of userret and uservec,
# because rust can not call userret directly then.
# otherwise rust code need to add some address to the function pointer,
# which is not allowed.
.section trampsec
.globl trampoline
trampoline:
.globl uservec
uservec:
# user_trap_ret() sets stvec to point here, so
# traps from user space start here,
# in supervisor mode, but with a
# user page table.
#
# sscratch points to where the process's p->tf is
# mapped into user space, at TRAPFRAME.
#
# swap a0 and sscratch
# so that a0 is TRAPFRAME
csrrw a0, sscratch, a0
# save the user registers in TRAPFRAME
sd ra, 40(a0)
sd sp, 48(a0)
sd gp, 56(a0)
sd tp, 64(a0)
sd t0, 72(a0)
sd t1, 80(a0)
sd t2, 88(a0)
sd s0, 96(a0)
sd s1, 104(a0)
sd a1, 120(a0)
sd a2, 128(a0)
sd a3, 136(a0)
sd a4, 144(a0)
sd a5, 152(a0)
sd a6, 160(a0)
sd a7, 168(a0)
sd s2, 176(a0)
sd s3, 184(a0)
sd s4, 192(a0)
sd s5, 200(a0)
sd s6, 208(a0)
sd s7, 216(a0)
sd s8, 224(a0)
sd s9, 232(a0)
sd s10, 240(a0)
sd s11, 248(a0)
sd t3, 256(a0)
sd t4, 264(a0)
sd t5, 272(a0)
sd t6, 280(a0)
# save the user a0 in p->tf->a0
csrr t0, sscratch
sd t0, 112(a0)
# save the user program counter
csrr t0, sepc
sd t0, 24(a0)
# restore kernel stack pointer from p->tf->kernel_sp
ld sp, 8(a0)
# make tp hold the current hartid, from p->tf->kernel_hartid
ld tp, 32(a0)
# load the address of user_trap(), p->tf->kernel_trap
ld t0, 16(a0)
# restore kernel page table from p->tf->kernel_satp
ld t1, 0(a0)
csrw satp, t1
sfence.vma zero, zero
# a0 is no longer valid, since the kernel page
# table does not specially map p->tf.
# jump to user_trap(), which does not return
jr t0
.align 4
.globl userret
userret:
# userret(TRAPFRAME, pagetable)
# switch from kernel to user.
# usertrapret() calls here.
# a0: TRAPFRAME, in user page table.
# a1: user page table, for satp.
# switch to the user page table.
csrw satp, a1
sfence.vma zero, zero
# put the saved user a0 in sscratch, so we
# can swap it with our a0 (TRAPFRAME) in the last step.
ld t0, 112(a0)
csrw sscratch, t0
# restore all but a0 from TRAPFRAME
ld ra, 40(a0)
ld sp, 48(a0)
ld gp, 56(a0)
ld tp, 64(a0)
ld t0, 72(a0)
ld t1, 80(a0)
ld t2, 88(a0)
ld s0, 96(a0)
ld s1, 104(a0)
ld a1, 120(a0)
ld a2, 128(a0)
ld a3, 136(a0)
ld a4, 144(a0)
ld a5, 152(a0)
ld a6, 160(a0)
ld a7, 168(a0)
ld s2, 176(a0)
ld s3, 184(a0)
ld s4, 192(a0)
ld s5, 200(a0)
ld s6, 208(a0)
ld s7, 216(a0)
ld s8, 224(a0)
ld s9, 232(a0)
ld s10, 240(a0)
ld s11, 248(a0)
ld t3, 256(a0)
ld t4, 264(a0)
ld t5, 272(a0)
ld t6, 280(a0)
# restore user a0, and save TRAPFRAME in sscratch
csrrw a0, sscratch, a0
# return to user mode and user pc.
# user_trap_ret() set up sstatus and sepc.
sret
|
Xiaozxiaobai/lab-util
| 2,420
|
kernel/src/asm/kernelvec.S
|
# from xv6-riscv:
# interrupts and exceptions while in supervisor
# mode come here.
#
# push all registers, call kerneltrap(), restore, return.
#
.section .text
.globl kernelvec
.align 4
kernelvec:
// make room to save registers.
addi sp, sp, -256
// save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
// call the trap handler in trap.rs
call kerneltrap
// restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
// not this, in case we moved CPUs: ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
// return to whatever we were doing in the kernel.
sret
# from xv6-riscv:
# machine-mode timer interrupt.
#
.section .text
.globl timervec
.align 4
timervec:
# start.rs has set up the memory that mscratch points to:
# scratch[0,8,16] : register save area.
# scratch[32] : address of CLINT's MTIMECMP register.
# scratch[40] : desired interval between interrupts.
csrrw a0, mscratch, a0
sd a1, 0(a0)
sd a2, 8(a0)
sd a3, 16(a0)
# schedule the next timer interrupt
# by adding interval to mtimecmp.
ld a1, 32(a0) # CLINT_MTIMECMP(hart)
ld a2, 40(a0) # interval
ld a3, 0(a1)
add a3, a3, a2
sd a3, 0(a1)
# raise a supervisor software interrupt.
li a1, 2
csrw sip, a1
ld a3, 16(a0)
ld a2, 8(a0)
ld a1, 0(a0)
csrrw a0, mscratch, a0
mret
|
Xiaozxiaobai/lab-util
| 3,489
|
kernel/src/asm/trampoline.S
|
# from xv6-riscv:
# code used to switch context between user and kernel space
#
# this code is mapped at the same virtual address
# (TRAMPOLINE) in user and kernel space so that
# it continues to work when it switches page tables.
#
# note: code size here should not be larger than a page,
# and kernel.ld will align the page for trampsec section
#
# diff: swap the region of userret and uservec,
# because rust can not call userret directly then.
# otherwise rust code need to add some address to the function pointer,
# which is not allowed.
.section trampsec
.globl trampoline
trampoline:
.globl uservec
uservec:
# user_trap_ret() sets stvec to point here, so
# traps from user space start here,
# in supervisor mode, but with a
# user page table.
#
# sscratch points to where the process's p->tf is
# mapped into user space, at TRAPFRAME.
#
# swap a0 and sscratch
# so that a0 is TRAPFRAME
csrrw a0, sscratch, a0
# save the user registers in TRAPFRAME
sd ra, 40(a0)
sd sp, 48(a0)
sd gp, 56(a0)
sd tp, 64(a0)
sd t0, 72(a0)
sd t1, 80(a0)
sd t2, 88(a0)
sd s0, 96(a0)
sd s1, 104(a0)
sd a1, 120(a0)
sd a2, 128(a0)
sd a3, 136(a0)
sd a4, 144(a0)
sd a5, 152(a0)
sd a6, 160(a0)
sd a7, 168(a0)
sd s2, 176(a0)
sd s3, 184(a0)
sd s4, 192(a0)
sd s5, 200(a0)
sd s6, 208(a0)
sd s7, 216(a0)
sd s8, 224(a0)
sd s9, 232(a0)
sd s10, 240(a0)
sd s11, 248(a0)
sd t3, 256(a0)
sd t4, 264(a0)
sd t5, 272(a0)
sd t6, 280(a0)
# save the user a0 in p->tf->a0
csrr t0, sscratch
sd t0, 112(a0)
# save the user program counter
csrr t0, sepc
sd t0, 24(a0)
# restore kernel stack pointer from p->tf->kernel_sp
ld sp, 8(a0)
# make tp hold the current hartid, from p->tf->kernel_hartid
ld tp, 32(a0)
# load the address of user_trap(), p->tf->kernel_trap
ld t0, 16(a0)
# restore kernel page table from p->tf->kernel_satp
ld t1, 0(a0)
csrw satp, t1
sfence.vma zero, zero
# a0 is no longer valid, since the kernel page
# table does not specially map p->tf.
# jump to user_trap(), which does not return
jr t0
.align 4
.globl userret
userret:
# userret(TRAPFRAME, pagetable)
# switch from kernel to user.
# usertrapret() calls here.
# a0: TRAPFRAME, in user page table.
# a1: user page table, for satp.
# switch to the user page table.
csrw satp, a1
sfence.vma zero, zero
# put the saved user a0 in sscratch, so we
# can swap it with our a0 (TRAPFRAME) in the last step.
ld t0, 112(a0)
csrw sscratch, t0
# restore all but a0 from TRAPFRAME
ld ra, 40(a0)
ld sp, 48(a0)
ld gp, 56(a0)
ld tp, 64(a0)
ld t0, 72(a0)
ld t1, 80(a0)
ld t2, 88(a0)
ld s0, 96(a0)
ld s1, 104(a0)
ld a1, 120(a0)
ld a2, 128(a0)
ld a3, 136(a0)
ld a4, 144(a0)
ld a5, 152(a0)
ld a6, 160(a0)
ld a7, 168(a0)
ld s2, 176(a0)
ld s3, 184(a0)
ld s4, 192(a0)
ld s5, 200(a0)
ld s6, 208(a0)
ld s7, 216(a0)
ld s8, 224(a0)
ld s9, 232(a0)
ld s10, 240(a0)
ld s11, 248(a0)
ld t3, 256(a0)
ld t4, 264(a0)
ld t5, 272(a0)
ld t6, 280(a0)
# restore user a0, and save TRAPFRAME in sscratch
csrrw a0, sscratch, a0
# return to user mode and user pc.
# user_trap_ret() set up sstatus and sepc.
sret
|
Xiaozxiaobai/os-lesson
| 2,420
|
src/asm/kernelvec.S
|
# from xv6-riscv:
# interrupts and exceptions while in supervisor
# mode come here.
#
# push all registers, call kerneltrap(), restore, return.
#
.section .text
.globl kernelvec
.align 4
kernelvec:
// make room to save registers.
addi sp, sp, -256
// save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
// call the trap handler in trap.rs
call kerneltrap
// restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
// not this, in case we moved CPUs: ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
// return to whatever we were doing in the kernel.
sret
# from xv6-riscv:
# machine-mode timer interrupt.
#
.section .text
.globl timervec
.align 4
timervec:
# start.rs has set up the memory that mscratch points to:
# scratch[0,8,16] : register save area.
# scratch[32] : address of CLINT's MTIMECMP register.
# scratch[40] : desired interval between interrupts.
csrrw a0, mscratch, a0
sd a1, 0(a0)
sd a2, 8(a0)
sd a3, 16(a0)
# schedule the next timer interrupt
# by adding interval to mtimecmp.
ld a1, 32(a0) # CLINT_MTIMECMP(hart)
ld a2, 40(a0) # interval
ld a3, 0(a1)
add a3, a3, a2
sd a3, 0(a1)
# raise a supervisor software interrupt.
li a1, 2
csrw sip, a1
ld a3, 16(a0)
ld a2, 8(a0)
ld a1, 0(a0)
csrrw a0, mscratch, a0
mret
|
Xiaozxiaobai/os-lesson
| 3,489
|
src/asm/trampoline.S
|
# from xv6-riscv:
# code used to switch context between user and kernel space
#
# this code is mapped at the same virtual address
# (TRAMPOLINE) in user and kernel space so that
# it continues to work when it switches page tables.
#
# note: code size here should not be larger than a page,
# and kernel.ld will align the page for trampsec section
#
# diff: swap the region of userret and uservec,
# because rust can not call userret directly then.
# otherwise rust code need to add some address to the function pointer,
# which is not allowed.
.section trampsec
.globl trampoline
trampoline:
.globl uservec
uservec:
# user_trap_ret() sets stvec to point here, so
# traps from user space start here,
# in supervisor mode, but with a
# user page table.
#
# sscratch points to where the process's p->tf is
# mapped into user space, at TRAPFRAME.
#
# swap a0 and sscratch
# so that a0 is TRAPFRAME
csrrw a0, sscratch, a0
# save the user registers in TRAPFRAME
sd ra, 40(a0)
sd sp, 48(a0)
sd gp, 56(a0)
sd tp, 64(a0)
sd t0, 72(a0)
sd t1, 80(a0)
sd t2, 88(a0)
sd s0, 96(a0)
sd s1, 104(a0)
sd a1, 120(a0)
sd a2, 128(a0)
sd a3, 136(a0)
sd a4, 144(a0)
sd a5, 152(a0)
sd a6, 160(a0)
sd a7, 168(a0)
sd s2, 176(a0)
sd s3, 184(a0)
sd s4, 192(a0)
sd s5, 200(a0)
sd s6, 208(a0)
sd s7, 216(a0)
sd s8, 224(a0)
sd s9, 232(a0)
sd s10, 240(a0)
sd s11, 248(a0)
sd t3, 256(a0)
sd t4, 264(a0)
sd t5, 272(a0)
sd t6, 280(a0)
# save the user a0 in p->tf->a0
csrr t0, sscratch
sd t0, 112(a0)
# save the user program counter
csrr t0, sepc
sd t0, 24(a0)
# restore kernel stack pointer from p->tf->kernel_sp
ld sp, 8(a0)
# make tp hold the current hartid, from p->tf->kernel_hartid
ld tp, 32(a0)
# load the address of user_trap(), p->tf->kernel_trap
ld t0, 16(a0)
# restore kernel page table from p->tf->kernel_satp
ld t1, 0(a0)
csrw satp, t1
sfence.vma zero, zero
# a0 is no longer valid, since the kernel page
# table does not specially map p->tf.
# jump to user_trap(), which does not return
jr t0
.align 4
.globl userret
userret:
# userret(TRAPFRAME, pagetable)
# switch from kernel to user.
# usertrapret() calls here.
# a0: TRAPFRAME, in user page table.
# a1: user page table, for satp.
# switch to the user page table.
csrw satp, a1
sfence.vma zero, zero
# put the saved user a0 in sscratch, so we
# can swap it with our a0 (TRAPFRAME) in the last step.
ld t0, 112(a0)
csrw sscratch, t0
# restore all but a0 from TRAPFRAME
ld ra, 40(a0)
ld sp, 48(a0)
ld gp, 56(a0)
ld tp, 64(a0)
ld t0, 72(a0)
ld t1, 80(a0)
ld t2, 88(a0)
ld s0, 96(a0)
ld s1, 104(a0)
ld a1, 120(a0)
ld a2, 128(a0)
ld a3, 136(a0)
ld a4, 144(a0)
ld a5, 152(a0)
ld a6, 160(a0)
ld a7, 168(a0)
ld s2, 176(a0)
ld s3, 184(a0)
ld s4, 192(a0)
ld s5, 200(a0)
ld s6, 208(a0)
ld s7, 216(a0)
ld s8, 224(a0)
ld s9, 232(a0)
ld s10, 240(a0)
ld s11, 248(a0)
ld t3, 256(a0)
ld t4, 264(a0)
ld t5, 272(a0)
ld t6, 280(a0)
# restore user a0, and save TRAPFRAME in sscratch
csrrw a0, sscratch, a0
# return to user mode and user pc.
# user_trap_ret() set up sstatus and sepc.
sret
|
Xiaozxiaobai/lab-trap
| 2,420
|
src/asm/kernelvec.S
|
# from xv6-riscv:
# interrupts and exceptions while in supervisor
# mode come here.
#
# push all registers, call kerneltrap(), restore, return.
#
.section .text
.globl kernelvec
.align 4
kernelvec:
// make room to save registers.
addi sp, sp, -256
// save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
// call the trap handler in trap.rs
call kerneltrap
// restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
// not this, in case we moved CPUs: ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
// return to whatever we were doing in the kernel.
sret
# from xv6-riscv:
# machine-mode timer interrupt.
#
.section .text
.globl timervec
.align 4
timervec:
# start.rs has set up the memory that mscratch points to:
# scratch[0,8,16] : register save area.
# scratch[32] : address of CLINT's MTIMECMP register.
# scratch[40] : desired interval between interrupts.
csrrw a0, mscratch, a0
sd a1, 0(a0)
sd a2, 8(a0)
sd a3, 16(a0)
# schedule the next timer interrupt
# by adding interval to mtimecmp.
ld a1, 32(a0) # CLINT_MTIMECMP(hart)
ld a2, 40(a0) # interval
ld a3, 0(a1)
add a3, a3, a2
sd a3, 0(a1)
# raise a supervisor software interrupt.
li a1, 2
csrw sip, a1
ld a3, 16(a0)
ld a2, 8(a0)
ld a1, 0(a0)
csrrw a0, mscratch, a0
mret
|
Xiaozxiaobai/lab-trap
| 3,489
|
src/asm/trampoline.S
|
# from xv6-riscv:
# code used to switch context between user and kernel space
#
# this code is mapped at the same virtual address
# (TRAMPOLINE) in user and kernel space so that
# it continues to work when it switches page tables.
#
# note: code size here should not be larger than a page,
# and kernel.ld will align the page for trampsec section
#
# diff: swap the region of userret and uservec,
# because rust can not call userret directly then.
# otherwise rust code need to add some address to the function pointer,
# which is not allowed.
.section trampsec
.globl trampoline
trampoline:
.globl uservec
uservec:
# user_trap_ret() sets stvec to point here, so
# traps from user space start here,
# in supervisor mode, but with a
# user page table.
#
# sscratch points to where the process's p->tf is
# mapped into user space, at TRAPFRAME.
#
# swap a0 and sscratch
# so that a0 is TRAPFRAME
csrrw a0, sscratch, a0
# save the user registers in TRAPFRAME
sd ra, 40(a0)
sd sp, 48(a0)
sd gp, 56(a0)
sd tp, 64(a0)
sd t0, 72(a0)
sd t1, 80(a0)
sd t2, 88(a0)
sd s0, 96(a0)
sd s1, 104(a0)
sd a1, 120(a0)
sd a2, 128(a0)
sd a3, 136(a0)
sd a4, 144(a0)
sd a5, 152(a0)
sd a6, 160(a0)
sd a7, 168(a0)
sd s2, 176(a0)
sd s3, 184(a0)
sd s4, 192(a0)
sd s5, 200(a0)
sd s6, 208(a0)
sd s7, 216(a0)
sd s8, 224(a0)
sd s9, 232(a0)
sd s10, 240(a0)
sd s11, 248(a0)
sd t3, 256(a0)
sd t4, 264(a0)
sd t5, 272(a0)
sd t6, 280(a0)
# save the user a0 in p->tf->a0
csrr t0, sscratch
sd t0, 112(a0)
# save the user program counter
csrr t0, sepc
sd t0, 24(a0)
# restore kernel stack pointer from p->tf->kernel_sp
ld sp, 8(a0)
# make tp hold the current hartid, from p->tf->kernel_hartid
ld tp, 32(a0)
# load the address of user_trap(), p->tf->kernel_trap
ld t0, 16(a0)
# restore kernel page table from p->tf->kernel_satp
ld t1, 0(a0)
csrw satp, t1
sfence.vma zero, zero
# a0 is no longer valid, since the kernel page
# table does not specially map p->tf.
# jump to user_trap(), which does not return
jr t0
.align 4
.globl userret
userret:
# userret(TRAPFRAME, pagetable)
# switch from kernel to user.
# usertrapret() calls here.
# a0: TRAPFRAME, in user page table.
# a1: user page table, for satp.
# switch to the user page table.
csrw satp, a1
sfence.vma zero, zero
# put the saved user a0 in sscratch, so we
# can swap it with our a0 (TRAPFRAME) in the last step.
ld t0, 112(a0)
csrw sscratch, t0
# restore all but a0 from TRAPFRAME
ld ra, 40(a0)
ld sp, 48(a0)
ld gp, 56(a0)
ld tp, 64(a0)
ld t0, 72(a0)
ld t1, 80(a0)
ld t2, 88(a0)
ld s0, 96(a0)
ld s1, 104(a0)
ld a1, 120(a0)
ld a2, 128(a0)
ld a3, 136(a0)
ld a4, 144(a0)
ld a5, 152(a0)
ld a6, 160(a0)
ld a7, 168(a0)
ld s2, 176(a0)
ld s3, 184(a0)
ld s4, 192(a0)
ld s5, 200(a0)
ld s6, 208(a0)
ld s7, 216(a0)
ld s8, 224(a0)
ld s9, 232(a0)
ld s10, 240(a0)
ld s11, 248(a0)
ld t3, 256(a0)
ld t4, 264(a0)
ld t5, 272(a0)
ld t6, 280(a0)
# restore user a0, and save TRAPFRAME in sscratch
csrrw a0, sscratch, a0
# return to user mode and user pc.
# user_trap_ret() set up sstatus and sepc.
sret
|
xingmin1/OSKernel2024-46
| 2,038
|
arceos/modules/axhal/linker.lds.S
|
OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
.rodata : ALIGN(4K) {
_srodata = .;
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
. = ALIGN(4K);
_erodata = .;
}
.init_array : ALIGN(4K) {
_sinit_array = .;
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
. = ALIGN(4K);
_einit_array = .;
}
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * %SMP%;
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
}
INSERT AFTER .tbss;
|
xingmin1/OSKernel2024-46
| 4,307
|
arceos/modules/axhal/src/platform/x86_pc/multiboot.S
|
# Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 510
# 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
xingmin1/OSKernel2024-46
| 1,965
|
arceos/modules/axhal/src/platform/x86_pc/ap_start.S
|
# Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
xingmin1/OSKernel2024-46
| 1,839
|
arceos/modules/axhal/src/arch/riscv/trap.S
|
.macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.if \from_user == 1
LDR t0, sp, 2 // load supervisor gp
LDR t1, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv gp, t0
mv tp, t1
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR t1, sp, 2 // load user gp and tp
LDR t0, sp, 3
STR gp, sp, 2 // save supervisor gp
STR tp, sp, 3 // save supervisor gp and tp
mv gp, t1
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
LDR t0, sp, 31
LDR t1, sp, 32
csrw sepc, t0
csrw sstatus, t1
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // swap sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
|
xingmin1/OSKernel2024-46
| 1,339
|
arceos/modules/axhal/src/arch/x86_64/syscall.S
|
.section .text
.code64
syscall_entry:
swapgs // switch to kernel gs
mov gs:[offset __PERCPU_USER_RSP_OFFSET], rsp // save user rsp
mov rsp, gs:[offset __PERCPU_TSS + {tss_rsp0_offset}] // switch to kernel stack
sub rsp, 8 // skip user ss
push gs:[offset __PERCPU_USER_RSP_OFFSET] // user rsp
push r11 // rflags
mov [rsp - 2 * 8], rcx // rip
sub rsp, 4 * 8 // skip until general registers
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_syscall_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 7 * 8
mov rcx, [rsp - 5 * 8] // rip
mov r11, [rsp - 3 * 8] // rflags
mov rsp, [rsp - 2 * 8] // user rsp
swapgs
sysretq
|
xingmin1/OSKernel2024-46
| 1,505
|
arceos/modules/axhal/src/arch/x86_64/trap.S
|
.equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp .Ltrap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp .Ltrap_common
.endif
.endm
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .text
.code64
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.Ltrap_common:
test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space
jz 1f
swapgs
1:
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_trap_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space
jz 2f
swapgs
2:
add rsp, 16 # pop vector, error_code
iretq
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
|
xingmin1/OSKernel2024-46
| 2,616
|
arceos/modules/axhal/src/arch/aarch64/trap.S
|
.macro SAVE_REGS
sub sp, sp, 34 * 8
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
mrs x9, sp_el0
mrs x10, elr_el1
mrs x11, spsr_el1
stp x30, x9, [sp, 30 * 8]
stp x10, x11, [sp, 32 * 8]
# We may have interrupted userspace, or a guest, or exit-from or
# return-to either of those. So we can't trust sp_el0, and need to
# restore it.
bl {cache_current_task_ptr}
.endm
.macro RESTORE_REGS
ldp x10, x11, [sp, 32 * 8]
ldp x30, x9, [sp, 30 * 8]
msr sp_el0, x9
msr elr_el1, x10
msr spsr_el1, x11
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, 34 * 8
.endm
.macro INVALID_EXCP, kind, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \kind
mov x2, \source
bl invalid_exception
b .Lexception_return
.endm
.macro HANDLE_SYNC
.p2align 7
SAVE_REGS
mov x0, sp
bl handle_sync_exception
b .Lexception_return
.endm
.macro HANDLE_IRQ
.p2align 7
SAVE_REGS
mov x0, sp
bl handle_irq_exception
b .Lexception_return
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
INVALID_EXCP 0 0
INVALID_EXCP 1 0
INVALID_EXCP 2 0
INVALID_EXCP 3 0
// current EL, with SP_ELx
HANDLE_SYNC
HANDLE_IRQ
INVALID_EXCP 2 1
INVALID_EXCP 3 1
// lower EL, aarch64
HANDLE_SYNC
HANDLE_IRQ
INVALID_EXCP 2 2
INVALID_EXCP 3 2
// lower EL, aarch32
INVALID_EXCP 0 3
INVALID_EXCP 1 3
INVALID_EXCP 2 3
INVALID_EXCP 3 3
.Lexception_return:
RESTORE_REGS
eret
|
xingmin1/OSKernel2024-46
| 2,544
|
arceos/tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s
|
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g2:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_ABS x0, __bss_start
ADR_ABS x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_relocate_binary
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Next, relocate the binary.
.L_relocate_binary:
ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to.
ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to.
ADR_ABS x2, __binary_nonzero_end_exclusive
.L_copy_loop:
ldr x3, [x0], #8
str x3, [x1], #8
cmp x1, x2
b.lo .L_copy_loop
// Prepare the jump to Rust code.
// Set the stack pointer.
ADR_ABS x0, __boot_core_stack_end_exclusive
mov sp, x0
// Jump to the relocated Rust code.
ADR_ABS x1, _start_rust
br x1
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
xrlexpert/hitsz-computer-architecture-2024
| 1,044
|
lab2/src/lab2/gemm_kernel_opt_loop.S
|
.text;
.p2align 2;
.global gemm_kernel_opt_loop;
.type gemm_kernel_opt_loop, %function;
#define MAT_C %rdi
#define MAT_A %rsi
#define MAT_B %r14
#define DIM_M %rcx
#define DIM_N %r8
#define DIM_K %r9
#define loop_m %r10
#define loop_k %r11
#define loop_n %r12
#define mat_elem_idx %r13
.macro PUSHD // 保存原通用寄存器值
push %rax
push %rbx
push %rcx
push %rdx
push %rsi
push %rdi
push %rbp
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
.endm
.macro POPD // 恢复原通用寄存器值
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rbp
pop %rdi
pop %rsi
pop %rdx
pop %rcx
pop %rbx
pop %rax
.endm
.macro DO_GEMM
// TODO: 练习3的性能优化任务
.endm
gemm_kernel_opt_loop:
PUSHD
DO_GEMM
POPD
ret
|
xrlexpert/hitsz-computer-architecture-2024
| 2,148
|
lab2/src/lab2/gemm_kernel_baseline.S
|
.text;
.p2align 2;
.global gemm_kernel_baseline;
.type gemm_kernel_baseline, %function;
#define MAT_C %rdi
#define MAT_A %rsi
#define MAT_B %r14
#define DIM_M %rcx
#define DIM_N %r8
#define DIM_K %r9
#define loop_m %r10
#define loop_k %r11
#define loop_n %r12
#define mat_elem_idx %r13
.macro PUSHD // 保存原通用寄存器值
push %rax
push %rbx
push %rcx
push %rdx
push %rsi
push %rdi
push %rbp
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
.endm
.macro POPD // 恢复原通用寄存器值
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rbp
pop %rdi
pop %rsi
pop %rdx
pop %rcx
pop %rbx
pop %rax
.endm
.macro GEMM_INIT
mov %rdx, MAT_B
xor loop_m, loop_m
xor loop_k, loop_k
xor loop_n, loop_n
.endm
.macro DO_GEMM
DO_LOOP_K:
xor loop_m, loop_m
DO_LOOP_M:
xor loop_n, loop_n
mov loop_m, %rax
mul DIM_K
mov %rax, mat_elem_idx
add loop_k, mat_elem_idx // 计算 m*K+k
flds (MAT_A, mat_elem_idx, 4) // 加载 A[m][k]
DO_LOOP_N:
mov DIM_N, %rax
mul loop_k
mov %rax, mat_elem_idx
add loop_n, mat_elem_idx // 计算 k*N+n
flds (MAT_B, mat_elem_idx, 4) // 加载 B[k][n]
fmul %st(1), %st(0) // 计算A[m][k] * B[k][n]
mov DIM_N, %rax
mul loop_m
mov %rax, mat_elem_idx
add loop_n, mat_elem_idx // 计算 m*N+n
flds (MAT_C, mat_elem_idx, 4) // 加载 C[m][n]
faddp %st(1), %st(0) // 计算 C[m][n] + A[m][k] * B[k][n]
fstps (MAT_C, mat_elem_idx, 4)
add $1, loop_n
cmp DIM_N, loop_n
jl DO_LOOP_N
fstp %st(0) // 仅弹出元素
add $1, loop_m
cmp DIM_M, loop_m
jl DO_LOOP_M
add $1, loop_k
cmp DIM_K, loop_k
jl DO_LOOP_K
.endm
gemm_kernel_baseline:
PUSHD
GEMM_INIT
DO_GEMM
POPD
ret
|
xrlexpert/hitsz-computer-architecture-2024
| 2,298
|
lab2/src/lab2/gemm_kernel_opt_prefetch.S
|
.text;
.p2align 2;
.global gemm_kernel_opt_prefetch;
.type gemm_kernel_opt_prefetch, %function;
#define MAT_C %rdi
#define MAT_A %rsi
#define MAT_B %r14
#define DIM_M %rcx
#define DIM_N %r8
#define DIM_K %r9
#define loop_m %r10
#define loop_k %r11
#define loop_n %r12
#define mat_elem_idx %r13
#define prefetch_elem_idx %r15
.macro PUSHD // 保存原通用寄存器值
push %rax
push %rbx
push %rcx
push %rdx
push %rsi
push %rdi
push %rbp
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
.endm
.macro POPD // 恢复原通用寄存器值
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rbp
pop %rdi
pop %rsi
pop %rdx
pop %rcx
pop %rbx
pop %rax
.endm
.macro GEMM_INIT
mov %rdx, MAT_B
xor loop_m, loop_m
xor loop_k, loop_k
xor loop_n, loop_n
.endm
.macro DO_GEMM
DO_LOOP_K:
xor loop_m, loop_m
DO_LOOP_M:
xor loop_n, loop_n
mov loop_m, %rax
mul DIM_K
mov %rax, mat_elem_idx
add loop_k, mat_elem_idx // 计算 m*K+k
flds (MAT_A, mat_elem_idx, 4) // 加载 A[m][k]
add DIM_K, mat_elem_idx
prefetcht0 (MAT_A, mat_elem_idx, 4) // 预取 A[m+1][k]
DO_LOOP_N:
mov DIM_N, %rax
mul loop_k
mov %rax, mat_elem_idx
add loop_n, mat_elem_idx // 计算 k*N+n
flds (MAT_B, mat_elem_idx, 4) // 加载 B[k][n]
fmul %st(1), %st(0) // 计算A[m][k] * B[k][n]
mov DIM_N, %rax
mul loop_m
mov %rax, mat_elem_idx
add loop_n, mat_elem_idx // 计算 m*N+n
flds (MAT_C, mat_elem_idx, 4) // 加载 C[m][n]
faddp %st(1), %st(0) // 计算 C[m][n] + A[m][k] * B[k][n]
fstps (MAT_C, mat_elem_idx, 4)
add $1, loop_n
cmp DIM_N, loop_n
jl DO_LOOP_N
fstp %st(0) // 仅弹出元素
add $1, loop_m
cmp DIM_M, loop_m
jl DO_LOOP_M
add $1, loop_k
cmp DIM_K, loop_k
jl DO_LOOP_K
.endm
gemm_kernel_opt_prefetch:
PUSHD
GEMM_INIT
DO_GEMM
POPD
ret
|
xrlexpert/hitsz-computer-architecture-2024
| 2,839
|
lab1/src/lab1/gemm_kernel.S
|
.text;
.p2align 2;
.global gemm_kernel;
.type gemm_kernel, %function;
// 以下是宏定义,方便按逻辑梳理
#define MAT_C %rdi
#define MAT_A %rsi
#define MAT_B %r14
#define DIM_M %rcx
#define DIM_N %r8
#define DIM_K %r9
#define loop_m %r10
#define loop_k %r11
#define loop_n %r12
#define mat_elem_idx %r13
.macro PUSHD // 保存原通用寄存器值
push %rax
push %rbx
push %rcx
push %rdx
push %rsi
push %rdi
push %rbp
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
.endm
.macro POPD // 恢复原通用寄存器值
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rbp
pop %rdi
pop %rsi
pop %rdx
pop %rcx
pop %rbx
pop %rax
.endm
.macro GEMM_INIT // 初始化
// TODO: 将矩阵B的地址存入MAT_B宏对应的寄存器
mov %rdx, MAT_B
xor loop_m, loop_m
xor loop_k, loop_k
xor loop_n, loop_n
.endm
.macro DO_GEMM // 使用kij遍历方式计算矩阵乘法
DO_LOOP_K: // 最外层的K维度的循环
xor loop_m, loop_m // 清空M维度的循环计数器
DO_LOOP_M: // M维度的循环
xor loop_n, loop_n // 清空M维度的循环计数器
// TODO: 加载A[m][k]
mov loop_m, mat_elem_idx
imul DIM_K, mat_elem_idx
add loop_k, mat_elem_idx
flds (MAT_A, mat_elem_idx, 4) // 加载 A[m][k]到st(0),flds只能将数据加载到栈顶即st(0), 原st(0)的数据存入st(1),若栈满则会压栈失败
DO_LOOP_N:
// TODO: 加载B[k][n]
mov loop_k, mat_elem_idx
imul DIM_N, mat_elem_idx
add loop_n, mat_elem_idx
flds (MAT_B, mat_elem_idx, 4) // 加载 B[k][n]
fmul %st(1), %st(0) // 计算A[m][k] * B[k][n]
// TODO: 加载C[m][n]
mov loop_m, mat_elem_idx
imul DIM_N, mat_elem_idx
add loop_n, mat_elem_idx
flds (MAT_C, mat_elem_idx, 4) // 加载 C[m][n]
faddp %st(1), %st(0) // 计算 C[m][n] + A[m][k] * B[k][n]
fstps (MAT_C, mat_elem_idx, 4) // 写回 C[m][n]
add $1, loop_n // N维度的循环计数器加1
cmp DIM_N, loop_n
jl DO_LOOP_N
fstp %st(0) // 清空st(0),此时矩阵A的元素不再使用
add $1, loop_m // M维度的循环计数器加1
cmp DIM_M, loop_m
jl DO_LOOP_M
add $1, loop_k // K维度的循环计数器加1
cmp DIM_K, loop_k
jl DO_LOOP_K
.endm
gemm_kernel:
PUSHD
GEMM_INIT
DO_GEMM
POPD
ret
|
xrlexpert/hitsz-computer-architecture-2024
| 1,573
|
lab1/src/lab1/print_integer.S
|
/**
* 向标准输出打印1个64位整数
* */
.section .bss
// .lcomm num, 8 // 存储64位整数
.lcomm buffer, 21 // 20个数字 + 1个空字符 作为输出的缓冲区
.section .data
newline: .byte 0xA // 换行符
.section .text
.globl _start
_start:
// 初始化要打印的数字
mov $1234567890123456789, %rax
// mov %rax, num(%rip)
// 将整数转换为字符串
// mov num(%rip), %rax
lea buffer+20(%rip), %rdi // 将输出字符串的最后一个字符地址放入rdi寄存器
movb $0, (%rdi) // 给取后一个字符赋'\0'标志结束
convert_loop: // 将整数转换为字符串等待输出
// mov %rax, %rdx
xor %rdx, %rdx
mov $10, %rcx
div %rcx // rdx = rax % 10, rax = rax / 10
add $'0', %dl // 计算对应的ascii码, rdx寄存器的低8位叫dl寄存器
dec %rdi
mov %dl, (%rdi) // 结果写内存
test %rax, %rax // 判断是否结束, TODO: 请添加判断结束的指令
jnz convert_loop
find_start: // 转换结束,跳过字符串前的所有0
cmpb $'0', (%rdi)
jne print_string
inc %rdi
jmp find_start
print_string: // 开始输出字符串
// 计算字符串长度
lea buffer+20(%rip), %rax
sub %rdi, %rax // 计算存储的字节数
mov %rax, %rdx // 待输出的字节数放入rdx中
// 系统调用号 (sys_write) // TODO: 请修复此处的系统调用以正常输出字符串
mov $1, %rax
// 指向字符串的指针
mov %rdi, %rsi
// 文件描述符 (stdout)
mov $1, %rdi
// 要写入的字节数
mov %rdx, %rdx
// 执行系统调用
syscall
// 打印换行符
mov $1, %rax
mov $1, %rdi
lea newline(%rip), %rsi
mov $1, %rdx
syscall
// 退出程序
mov $60, %rax // 系统调用号 (sys_exit)
xor %rdi, %rdi // 退出状态码
syscall
|
xrlexpert/hitsz-computer-architecture-2024
| 6,154
|
lab3/src/lab3/gemm_kernel_opt_avx.S
|
.text;
.p2align 2;
.global gemm_kernel_opt_avx;
.type gemm_kernel_opt_avx, %function;
#define AVX_REG_BYTE_WIDTH 32
#define MAT_C %rdi
#define MAT_A %rsi
#define MAT_B %r13
#define DIM_M %rcx
#define DIM_N %r8
#define DIM_K %r9
#define loop_m %r10
#define loop_k %r11
#define loop_n %r12
#define mat_elem_idx %r14
#define temp_reg %r15
// 以下是计算过程中用到的avx寄存器
#define mat_c0_0_8 %ymm0
#define mat_c0_8_16 %ymm1
#define mat_c0_16_24 %ymm2
#define mat_c0_24_32 %ymm3
#define mat_c1_0_8 %ymm4
#define mat_c1_8_16 %ymm5
#define mat_c1_16_24 %ymm6
#define mat_c1_24_32 %ymm7
#define mat_a0_0_8 %ymm8
#define mat_a1_0_8 %ymm9
#define mat_b0_0_8 %ymm10
#define mat_b0_8_16 %ymm11
#define mat_b0_16_24 %ymm12
#define mat_b0_24_32 %ymm13
.macro PUSHD // 保存原通用寄存器值
push %rax
push %rbx
push %rcx
push %rdx
push %rsi
push %rdi
push %rbp
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
.endm
.macro POPD // 恢复原通用寄存器值
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rbp
pop %rdi
pop %rsi
pop %rdx
pop %rcx
pop %rbx
pop %rax
.endm
.macro GEMM_INIT
mov %rdx, MAT_B
.endm
.macro LOAD_MAT_A // 每次装载矩阵A同一列的2个元素, 即A[m][k], A[m+1][k]
// 装载A[m][k]的数据
mov loop_m, %rax
mul DIM_K
mov %rax, temp_reg
add loop_k, temp_reg
// 计算A[m][k]的字节地址
mov temp_reg, mat_elem_idx
shl $2, mat_elem_idx // 左移,相当于乘4
vbroadcastss (MAT_A, mat_elem_idx), mat_a0_0_8 // 将A[m][k]广播到AVX寄存器的8个单元
// TODO 练习3: 请添加加载并广播A[m+1][k]-->mat_a1_0_8的逻辑
mov temp_reg, mat_elem_idx
add DIM_K, mat_elem_idx
shl $2, mat_elem_idx // 左移,相当于乘4
vbroadcastss (MAT_A, mat_elem_idx), mat_a1_0_8
.endm
.macro LOAD_MAT_B // 每次装载矩阵B一行32个元素, 即B[k][n:n+32]
// TODO 练习3: 请添加加载B[k][n:n+32]-->mat_b0_0_8, mat_b0_8_16, mat_b0_16_24, mat_b0_24_32的逻辑
// 装载B[k][n]的数据
mov loop_k, %rax
mul DIM_N
mov %rax, temp_reg
add loop_n, temp_reg
// 计算B[k][n]的字节地址
mov temp_reg, mat_elem_idx
shl $2, mat_elem_idx // 左移,相当于乘4
vmovups (MAT_B, mat_elem_idx), mat_b0_0_8
add $32, mat_elem_idx
vmovups (MAT_B, mat_elem_idx), mat_b0_8_16
add $32, mat_elem_idx
vmovups (MAT_B, mat_elem_idx), mat_b0_16_24
add $32, mat_elem_idx
vmovups (MAT_B, mat_elem_idx), mat_b0_24_32
.endm
.macro LOAD_MAT_C
mov loop_m, %rax
mul DIM_N
mov %rax, temp_reg
add loop_n, temp_reg
// 装载矩阵C第一行的数据, 即C[m][n:n+32]
mov temp_reg, mat_elem_idx
shl $2, mat_elem_idx // 左移,相当于乘4
// TODO 练习3: 请添加加载C[m][n:n+32]-->mat_c0_0_8, mat_c0_8_16, mat_c0_16_24, mat_c0_24_32的逻辑
vmovups (MAT_C, mat_elem_idx), mat_c0_0_8
add $32, mat_elem_idx
vmovups (MAT_C, mat_elem_idx), mat_c0_8_16
add $32, mat_elem_idx
vmovups (MAT_C, mat_elem_idx), mat_c0_16_24
add $32, mat_elem_idx
vmovups (MAT_C, mat_elem_idx), mat_c0_24_32
// 装载矩阵C第二行的数据, 即C[m+1][n:n+32]
mov temp_reg, mat_elem_idx
add DIM_N, mat_elem_idx
shl $2, mat_elem_idx // 左移,相当于乘4
// TODO 练习3: 请添加加载C[m+1][n:n+32]-->mat_c1_0_8, mat_c1_8_16, mat_c1_16_24, mat_c1_24_32的逻辑
vmovups (MAT_C, mat_elem_idx), mat_c1_0_8
add $32, mat_elem_idx
vmovups (MAT_C, mat_elem_idx), mat_c1_8_16
add $32, mat_elem_idx
vmovups (MAT_C, mat_elem_idx), mat_c1_16_24
add $32, mat_elem_idx
vmovups (MAT_C, mat_elem_idx), mat_c1_24_32
.endm
.macro STORE_MAT_C
// 保存矩阵C第一行的数据
mov loop_m, %rax
mul DIM_N
mov %rax, temp_reg
add loop_n, temp_reg
// 保存矩阵C第一行的数据, 即C[m][n:n+32]
mov temp_reg, mat_elem_idx
shl $2, mat_elem_idx // 左移,相当于乘4
// TODO 练习3: 请添加保存mat_c0_0_8, mat_c0_8_16, mat_c0_16_24, mat_c0_24_32 --> C[m][n:n+32]的逻辑
vmovups mat_c0_0_8, (MAT_C, mat_elem_idx)
add $32, mat_elem_idx
vmovups mat_c0_8_16, (MAT_C, mat_elem_idx)
add $32, mat_elem_idx
vmovups mat_c0_16_24, (MAT_C, mat_elem_idx)
add $32, mat_elem_idx
vmovups mat_c0_24_32, (MAT_C, mat_elem_idx)
// 保存矩阵C第二行的数据, 即C[m+1][n:n+32]
// TODO 练习3: 请添加保存mat_c1_0_8, mat_c1_8_16, mat_c1_16_24, mat_c1_24_32 --> C[m+1][n:n+32]的逻辑
mov temp_reg, mat_elem_idx
add DIM_N, mat_elem_idx
shl $2, mat_elem_idx // 左移,相当于乘4
vmovups mat_c1_0_8, (MAT_C, mat_elem_idx)
add $32, mat_elem_idx
vmovups mat_c1_8_16, (MAT_C, mat_elem_idx)
add $32, mat_elem_idx
vmovups mat_c1_16_24, (MAT_C, mat_elem_idx)
add $32, mat_elem_idx
vmovups mat_c1_24_32, (MAT_C, mat_elem_idx)
.endm
.macro DO_COMPUTE // 计算 C[m:m+2][n:n+32] += A[m:m+2][k] * B[k][n:n+32]
// TODO 练习3: 请添加计算C[m:m+2][n:n+32] += A[m:m+2][k] * B[k][n:n+32]的逻辑
vfmadd231ps mat_a0_0_8, mat_b0_0_8, mat_c0_0_8
vfmadd231ps mat_a0_0_8, mat_b0_8_16, mat_c0_8_16
vfmadd231ps mat_a0_0_8, mat_b0_16_24, mat_c0_16_24
vfmadd231ps mat_a0_0_8, mat_b0_24_32, mat_c0_24_32
vfmadd231ps mat_a1_0_8, mat_b0_0_8, mat_c1_0_8
vfmadd231ps mat_a1_0_8, mat_b0_8_16, mat_c1_8_16
vfmadd231ps mat_a1_0_8, mat_b0_16_24, mat_c1_16_24
vfmadd231ps mat_a1_0_8, mat_b0_24_32, mat_c1_24_32
.endm
.macro DO_GEMM
xor loop_n, loop_n
DO_LOOP_N:
xor loop_m, loop_m
DO_LOOP_M:
// 装载矩阵C的数据
LOAD_MAT_C
xor loop_k, loop_k
DO_LOOP_K:
// 装载矩阵A和矩阵B分块的数据
LOAD_MAT_A
LOAD_MAT_B
DO_COMPUTE
add $1, loop_k // kr=1
cmp DIM_K, loop_k
jl DO_LOOP_K
// 保存结果
STORE_MAT_C
add $2, loop_m // mr=2
cmp DIM_M, loop_m
jl DO_LOOP_M
add $32, loop_n // nr=32
cmp DIM_N, loop_n
jl DO_LOOP_N
.endm
gemm_kernel_opt_avx:
PUSHD
GEMM_INIT
DO_GEMM
POPD
ret
|
xrlexpert/hitsz-computer-architecture-2024
| 2,753
|
lab3/src/lab3/gemm_kernel_opt_loop_unrolling.S
|
.text;
.p2align 2;
.global gemm_kernel_opt_loop_unrolling;
.type gemm_kernel_opt_loop_unrolling, %function;
#define MAT_C %rdi
#define MAT_A %rsi
#define MAT_B %r14
#define DIM_M %rcx
#define DIM_N %r8
#define DIM_K %r9
#define loop_m %r10
#define loop_k %r11
#define loop_n %r12
#define mat_elem_idx %r13
.macro PUSHD // 保存原通用寄存器值
push %rax
push %rbx
push %rcx
push %rdx
push %rsi
push %rdi
push %rbp
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
.endm
.macro POPD // 恢复原通用寄存器值
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rbp
pop %rdi
pop %rsi
pop %rdx
pop %rcx
pop %rbx
pop %rax
.endm
.macro GEMM_INIT
mov %rdx, MAT_B
xor loop_m, loop_m
xor loop_k, loop_k
xor loop_n, loop_n
.endm
.macro DO_GEMM
DO_LOOP_K:
xor loop_m, loop_m
DO_LOOP_M:
xor loop_n, loop_n
mov loop_m, %rax
mul DIM_K
mov %rax, mat_elem_idx
add loop_k, mat_elem_idx // 计算 m*K+k
flds (MAT_A, mat_elem_idx, 4) // 加载 A[m][k]
DO_LOOP_N:
mov DIM_N, %rax
mul loop_k
mov %rax, mat_elem_idx
add loop_n, mat_elem_idx
flds (MAT_B, mat_elem_idx, 4) // 加载 B[k][n] --> st(0)
fmul %st(1), %st(0) // 计算A[m][k] * B[k][n] --> st(0)
add $1, mat_elem_idx // 偏移到 B[k][n+1]
flds (MAT_B, mat_elem_idx, 4) // 加载 B[k][n+1] --> st(0)
fmul %st(2), %st(0) // 计算 A[m][k] * B[k][n+1] --> st(0)
mov DIM_N, %rax
mul loop_m
mov %rax, mat_elem_idx
add loop_n, mat_elem_idx // 计算 m*N+n
flds (MAT_C, mat_elem_idx, 4) // C[m][n] --> st(1)
add $1, mat_elem_idx
flds (MAT_C, mat_elem_idx, 4) // C[m][n+1] --> st(0)
fadd %st(2), %st(0) // C[m][n+1] + A[m][k] * B[k][n+1]
fstps (MAT_C, mat_elem_idx, 4) // 保存C[m][n+1]并弹出
fadd %st(2), %st(0) // C[m][n] + A[m][k] * B[k][n]
sub $1, mat_elem_idx
fstps (MAT_C, mat_elem_idx, 4) // 保存C[m][n]并弹出
fstp %st(0) //弹出A[m][k] * B[k][n+1]
fstp %st(0) //弹出A[m][k] * B[k][n]
add $2, loop_n
cmp DIM_N, loop_n
jl DO_LOOP_N
fstp %st(0) // 仅弹出元素
add $1, loop_m
cmp DIM_M, loop_m
jl DO_LOOP_M
add $1, loop_k
cmp DIM_K, loop_k
jl DO_LOOP_K
.endm
gemm_kernel_opt_loop_unrolling:
PUSHD
GEMM_INIT
DO_GEMM
POPD
ret
|
xrlexpert/hitsz-computer-architecture-2024
| 2,148
|
lab3/src/lab3/gemm_kernel_baseline.S
|
.text;
.p2align 2;
.global gemm_kernel_baseline;
.type gemm_kernel_baseline, %function;
#define MAT_C %rdi
#define MAT_A %rsi
#define MAT_B %r14
#define DIM_M %rcx
#define DIM_N %r8
#define DIM_K %r9
#define loop_m %r10
#define loop_k %r11
#define loop_n %r12
#define mat_elem_idx %r13
.macro PUSHD // 保存原通用寄存器值
push %rax
push %rbx
push %rcx
push %rdx
push %rsi
push %rdi
push %rbp
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
.endm
.macro POPD // 恢复原通用寄存器值
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rbp
pop %rdi
pop %rsi
pop %rdx
pop %rcx
pop %rbx
pop %rax
.endm
.macro GEMM_INIT
mov %rdx, MAT_B
xor loop_m, loop_m
xor loop_k, loop_k
xor loop_n, loop_n
.endm
.macro DO_GEMM
DO_LOOP_K:
xor loop_m, loop_m
DO_LOOP_M:
xor loop_n, loop_n
mov loop_m, %rax
mul DIM_K
mov %rax, mat_elem_idx
add loop_k, mat_elem_idx // 计算 m*K+k
flds (MAT_A, mat_elem_idx, 4) // 加载 A[m][k]
DO_LOOP_N:
mov DIM_N, %rax
mul loop_k
mov %rax, mat_elem_idx
add loop_n, mat_elem_idx // 计算 k*N+n
flds (MAT_B, mat_elem_idx, 4) // 加载 B[k][n]
fmul %st(1), %st(0) // 计算A[m][k] * B[k][n]
mov DIM_N, %rax
mul loop_m
mov %rax, mat_elem_idx
add loop_n, mat_elem_idx // 计算 m*N+n
flds (MAT_C, mat_elem_idx, 4) // 加载 C[m][n]
faddp %st(1), %st(0) // 计算 C[m][n] + A[m][k] * B[k][n]
fstps (MAT_C, mat_elem_idx, 4)
add $1, loop_n
cmp DIM_N, loop_n
jl DO_LOOP_N
fstp %st(0) // 仅弹出元素
add $1, loop_m
cmp DIM_M, loop_m
jl DO_LOOP_M
add $1, loop_k
cmp DIM_K, loop_k
jl DO_LOOP_K
.endm
gemm_kernel_baseline:
PUSHD
GEMM_INIT
DO_GEMM
POPD
ret
|
xrvdg/modmulzoo
| 2,441
|
crates/experiments-lowlevel/asm/smul notes.s
|
; smult notes
.global _main
.align 2
// Data section
.data
hello_str:
.ascii "Hello, World!\n"
hello_len = . - hello_str
#define henk mov x29, sp
.text
_main:
// Setup stack frame
stp x29, x30, [sp, #-16]!
; mov x29, sp
henk
// write(1, hello_str, hello_len)
mov x0, #1 // file descriptor 1 is stdout
adrp x1, hello_str@PAGE
add x1, x1, hello_str@PAGEOFF
mov x2, #hello_len // length of the string
mov x16, #4 // macOS write syscall number
svc #0x80 // invoke syscall
// Return 0
mov x0, #0
ldp x29, x30, [sp], #16
ret
smult:
; all the register numbers can be reduced by one
; x4 round
mul x9, x0, x4
umulh x10, x0, x4
; Writing this way the umulh goes together with the mul of the next
; can't use multiply accumulate as there is no madds instruction
mul x11, x1, x4
umulh x12, x1, x4
adds x10, x10, x11
; can adds be squeezed in between or does umulh affect the flags? umulh doens't affect flags but shouldn't make a difference
; x11 is free
cinc x12, hs
mul x13, x2, x4
umulh x14, x2, x4
adds x12, x12, x13
; x13 is free
cinc x14, hs
mul x15, x3, x4
umulh x16, x3, x4
adds x14, x14, x15
; x15 is free
cinc x16, hs
; adcs is an option
smult_adcs:
; all the register numbers can be reduced by one
mul x9, x0, x4
umulh x10, x0, x4
mul x11, x1, x4
umulh x12, x1, x4
adds x10, x10, x11
; can adds be squeezed in between or does umulh affect the flags? umulh doens't affect flags but shouldn't make a difference
; x11 is free
mul x13, x2, x4
umulh x14, x2, x4
adcs x12, x12, x13
; x13 is free
mul x15, x3, x4
umulh x16, x3, x4
adcs x14, x14, x15
; x15 is free
cinc x16, hs
smult_adcs_less_reg:
; all the register numbers can be reduced by one
mul x5, x0, x4
umulh x6, x0, x4
; x0 free
mul x10, x1, x4
umulh x7, x1, x4
adds x6, x6, x10
; can adds be squeezed in between or does umulh affect the flags? umulh doens't affect flags but shouldn't make a difference
; x1 + x10 is free
mul x10, x2, x4
umulh x8, x2, x4
adcs x7, x7, x10
; x2 + x10 is free
mul x10, x3, x4
umulh x9, x3, x4
adcs x8, x8, x10
cinc x9, hs
; x3 + x10 is free
; adcs is an option
; movs that you will run into, but those should be relatively cheap
; call convention to hang on to
|
xrvdg/modmulzoo
| 5,870
|
crates/modmul-asm/asm/single_step.s
|
//in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3],
//in("x4") b[0], in("x5") b[1], in("x6") b[2], in("x7") b[3],
//lateout("x0") out[0], lateout("x1") out[1], lateout("x2") out[2], lateout("x3") out[3],
//lateout("x4") _, lateout("x5") _, lateout("x6") _, lateout("x7") _, lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _,
//lateout("lr") _
.global _single_step
.align 4
.text
_single_step:
mul x8, x0, x4
umulh x9, x0, x4
mul x10, x1, x4
umulh x11, x1, x4
adds x9, x10, x9
cinc x10, x11, hs
mul x11, x2, x4
umulh x12, x2, x4
adds x10, x11, x10
cinc x11, x12, hs
mul x12, x3, x4
umulh x4, x3, x4
adds x11, x12, x11
cinc x4, x4, hs
mul x12, x0, x5
umulh x13, x0, x5
adds x9, x12, x9
cinc x12, x13, hs
mul x13, x1, x5
umulh x14, x1, x5
adds x12, x13, x12
cinc x13, x14, hs
adds x10, x12, x10
cinc x12, x13, hs
mul x13, x2, x5
umulh x14, x2, x5
adds x12, x13, x12
cinc x13, x14, hs
adds x11, x12, x11
cinc x12, x13, hs
mul x13, x3, x5
umulh x5, x3, x5
adds x12, x13, x12
cinc x5, x5, hs
adds x4, x12, x4
cinc x5, x5, hs
mul x12, x0, x6
umulh x13, x0, x6
adds x10, x12, x10
cinc x12, x13, hs
mul x13, x1, x6
umulh x14, x1, x6
adds x12, x13, x12
cinc x13, x14, hs
adds x11, x12, x11
cinc x12, x13, hs
mul x13, x2, x6
umulh x14, x2, x6
adds x12, x13, x12
cinc x13, x14, hs
adds x4, x12, x4
cinc x12, x13, hs
mul x13, x3, x6
umulh x6, x3, x6
adds x12, x13, x12
cinc x6, x6, hs
adds x5, x12, x5
cinc x6, x6, hs
mul x12, x0, x7
umulh x0, x0, x7
adds x11, x12, x11
cinc x0, x0, hs
mul x12, x1, x7
umulh x1, x1, x7
adds x0, x12, x0
cinc x1, x1, hs
adds x0, x0, x4
cinc x1, x1, hs
mul x4, x2, x7
umulh x2, x2, x7
adds x1, x4, x1
cinc x2, x2, hs
adds x1, x1, x5
cinc x2, x2, hs
mul x4, x3, x7
umulh x3, x3, x7
adds x2, x4, x2
cinc x3, x3, hs
adds x2, x2, x6
cinc x3, x3, hs
mov x4, #48718
movk x4, #4732, lsl 16
movk x4, #45078, lsl 32
movk x4, #39852, lsl 48
mov x5, #16676
movk x5, #12692, lsl 16
movk x5, #20986, lsl 32
movk x5, #2848, lsl 48
mov x6, #51052
movk x6, #24721, lsl 16
movk x6, #61092, lsl 32
movk x6, #45156, lsl 48
mov x7, #3197
movk x7, #18936, lsl 16
movk x7, #10922, lsl 32
movk x7, #11014, lsl 48
mul x12, x4, x8
umulh x4, x4, x8
adds x11, x12, x11
cinc x4, x4, hs
mul x12, x5, x8
umulh x5, x5, x8
adds x4, x12, x4
cinc x5, x5, hs
adds x0, x4, x0
cinc x4, x5, hs
mul x5, x6, x8
umulh x6, x6, x8
adds x4, x5, x4
cinc x5, x6, hs
adds x1, x4, x1
cinc x4, x5, hs
mul x5, x7, x8
umulh x6, x7, x8
adds x4, x5, x4
cinc x5, x6, hs
adds x2, x4, x2
cinc x4, x5, hs
add x3, x3, x4
mov x4, #56431
movk x4, #30457, lsl 16
movk x4, #30012, lsl 32
movk x4, #6382, lsl 48
mov x5, #59151
movk x5, #41769, lsl 16
movk x5, #32276, lsl 32
movk x5, #21677, lsl 48
mov x6, #34015
movk x6, #20342, lsl 16
movk x6, #13935, lsl 32
movk x6, #11030, lsl 48
mov x7, #13689
movk x7, #8159, lsl 16
movk x7, #215, lsl 32
movk x7, #4913, lsl 48
mul x8, x4, x9
umulh x4, x4, x9
adds x8, x8, x11
cinc x4, x4, hs
mul x11, x5, x9
umulh x5, x5, x9
adds x4, x11, x4
cinc x5, x5, hs
adds x0, x4, x0
cinc x4, x5, hs
mul x5, x6, x9
umulh x6, x6, x9
adds x4, x5, x4
cinc x5, x6, hs
adds x1, x4, x1
cinc x4, x5, hs
mul x5, x7, x9
umulh x6, x7, x9
adds x4, x5, x4
cinc x5, x6, hs
adds x2, x4, x2
cinc x4, x5, hs
add x3, x3, x4
mov x4, #61005
movk x4, #58262, lsl 16
movk x4, #32851, lsl 32
movk x4, #11582, lsl 48
mov x5, #37581
movk x5, #43836, lsl 16
movk x5, #36286, lsl 32
movk x5, #51783, lsl 48
mov x6, #10899
movk x6, #30709, lsl 16
movk x6, #61551, lsl 32
movk x6, #45784, lsl 48
mov x7, #36612
movk x7, #63402, lsl 16
movk x7, #47623, lsl 32
movk x7, #9430, lsl 48
mul x9, x4, x10
umulh x4, x4, x10
adds x8, x9, x8
cinc x4, x4, hs
mul x9, x5, x10
umulh x5, x5, x10
adds x4, x9, x4
cinc x5, x5, hs
adds x0, x4, x0
cinc x4, x5, hs
mul x5, x6, x10
umulh x6, x6, x10
adds x4, x5, x4
cinc x5, x6, hs
adds x1, x4, x1
cinc x4, x5, hs
mul x5, x7, x10
umulh x6, x7, x10
adds x4, x5, x4
cinc x5, x6, hs
adds x2, x4, x2
cinc x4, x5, hs
add x3, x3, x4
mov x4, #65535
movk x4, #61439, lsl 16
movk x4, #62867, lsl 32
movk x4, #49889, lsl 48
mul x4, x4, x8
mov x5, #1
movk x5, #61440, lsl 16
movk x5, #62867, lsl 32
movk x5, #17377, lsl 48
mov x6, #28817
movk x6, #31161, lsl 16
movk x6, #59464, lsl 32
movk x6, #10291, lsl 48
mov x7, #22621
movk x7, #33153, lsl 16
movk x7, #17846, lsl 32
movk x7, #47184, lsl 48
mov x9, #41001
movk x9, #57649, lsl 16
movk x9, #20082, lsl 32
movk x9, #12388, lsl 48
mul x10, x5, x4
umulh x5, x5, x4
cmn x10, x8
cinc x5, x5, hs
mul x8, x6, x4
umulh x6, x6, x4
adds x5, x8, x5
cinc x6, x6, hs
adds x0, x5, x0
cinc x5, x6, hs
mul x6, x7, x4
umulh x7, x7, x4
adds x5, x6, x5
cinc x6, x7, hs
adds x1, x5, x1
cinc x5, x6, hs
mul x6, x9, x4
umulh x4, x9, x4
adds x5, x6, x5
cinc x4, x4, hs
adds x2, x5, x2
cinc x4, x4, hs
add x3, x3, x4
mov x4, #2
movk x4, #57344, lsl 16
movk x4, #60199, lsl 32
movk x4, #34755, lsl 48
mov x5, #57634
movk x5, #62322, lsl 16
movk x5, #53392, lsl 32
movk x5, #20583, lsl 48
mov x6, #45242
movk x6, #770, lsl 16
movk x6, #35693, lsl 32
movk x6, #28832, lsl 48
mov x7, #16467
movk x7, #49763, lsl 16
movk x7, #40165, lsl 32
movk x7, #24776, lsl 48
subs x4, x0, x4
sbcs x5, x1, x5
sbcs x6, x2, x6
sbcs x7, x3, x7
tst x3, #9223372036854775808
csel x0, x4, x0, mi
csel x1, x5, x1, mi
csel x2, x6, x2, mi
csel x3, x7, x3, mi
ret
|
xrvdg/modmulzoo
| 5,836
|
crates/modmul-asm/asm/single_step_split.s
|
//in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3],
//in("x4") b[0], in("x5") b[1], in("x6") b[2], in("x7") b[3],
//lateout("x0") out[0], lateout("x1") out[1], lateout("x2") out[2], lateout("x3") out[3],
//lateout("x4") _, lateout("x5") _, lateout("x6") _, lateout("x7") _, lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _, lateout("x16") _, lateout("x17") _, lateout("x20") _, lateout("x21") _, lateout("x22") _, lateout("x23") _,
//lateout("lr") _
.global _single_step_split
.align 4
.text
_single_step_split:
mul x8, x0, x4
umulh x9, x0, x4
mul x10, x1, x4
umulh x11, x1, x4
adds x9, x10, x9
cinc x10, x11, hs
mul x11, x2, x4
umulh x12, x2, x4
adds x10, x11, x10
cinc x11, x12, hs
mul x12, x3, x4
umulh x4, x3, x4
adds x11, x12, x11
cinc x4, x4, hs
mul x12, x0, x5
umulh x13, x0, x5
adds x9, x12, x9
cinc x12, x13, hs
mul x13, x1, x5
umulh x14, x1, x5
adds x12, x13, x12
cinc x13, x14, hs
adds x10, x12, x10
cinc x12, x13, hs
mul x13, x2, x5
umulh x14, x2, x5
adds x12, x13, x12
cinc x13, x14, hs
adds x11, x12, x11
cinc x12, x13, hs
mul x13, x3, x5
umulh x5, x3, x5
adds x12, x13, x12
cinc x5, x5, hs
adds x4, x12, x4
cinc x5, x5, hs
mul x12, x0, x6
umulh x13, x0, x6
adds x10, x12, x10
cinc x12, x13, hs
mul x13, x1, x6
umulh x14, x1, x6
adds x12, x13, x12
cinc x13, x14, hs
adds x11, x12, x11
cinc x12, x13, hs
mul x13, x2, x6
umulh x14, x2, x6
adds x12, x13, x12
cinc x13, x14, hs
adds x4, x12, x4
cinc x12, x13, hs
mul x13, x3, x6
umulh x6, x3, x6
adds x12, x13, x12
cinc x6, x6, hs
adds x5, x12, x5
cinc x6, x6, hs
mul x12, x0, x7
umulh x0, x0, x7
adds x11, x12, x11
cinc x0, x0, hs
mul x12, x1, x7
umulh x1, x1, x7
adds x0, x12, x0
cinc x1, x1, hs
adds x0, x0, x4
cinc x1, x1, hs
mul x4, x2, x7
umulh x2, x2, x7
adds x1, x4, x1
cinc x2, x2, hs
adds x1, x1, x5
cinc x2, x2, hs
mul x4, x3, x7
umulh x3, x3, x7
adds x2, x4, x2
cinc x3, x3, hs
adds x2, x2, x6
cinc x3, x3, hs
mov x4, #48718
movk x4, #4732, lsl 16
movk x4, #45078, lsl 32
movk x4, #39852, lsl 48
mov x5, #16676
movk x5, #12692, lsl 16
movk x5, #20986, lsl 32
movk x5, #2848, lsl 48
mov x6, #51052
movk x6, #24721, lsl 16
movk x6, #61092, lsl 32
movk x6, #45156, lsl 48
mov x7, #3197
movk x7, #18936, lsl 16
movk x7, #10922, lsl 32
movk x7, #11014, lsl 48
mul x12, x4, x8
umulh x4, x4, x8
mul x13, x5, x8
umulh x5, x5, x8
adds x4, x13, x4
cinc x5, x5, hs
mul x13, x6, x8
umulh x6, x6, x8
adds x5, x13, x5
cinc x6, x6, hs
mul x13, x7, x8
umulh x7, x7, x8
adds x6, x13, x6
cinc x7, x7, hs
mov x8, #56431
movk x8, #30457, lsl 16
movk x8, #30012, lsl 32
movk x8, #6382, lsl 48
mov x13, #59151
movk x13, #41769, lsl 16
movk x13, #32276, lsl 32
movk x13, #21677, lsl 48
mov x14, #34015
movk x14, #20342, lsl 16
movk x14, #13935, lsl 32
movk x14, #11030, lsl 48
mov x15, #13689
movk x15, #8159, lsl 16
movk x15, #215, lsl 32
movk x15, #4913, lsl 48
mul x16, x8, x9
umulh x8, x8, x9
mul x17, x13, x9
umulh x13, x13, x9
adds x8, x17, x8
cinc x13, x13, hs
mul x17, x14, x9
umulh x14, x14, x9
adds x13, x17, x13
cinc x14, x14, hs
mul x17, x15, x9
umulh x9, x15, x9
adds x14, x17, x14
cinc x9, x9, hs
mov x15, #61005
movk x15, #58262, lsl 16
movk x15, #32851, lsl 32
movk x15, #11582, lsl 48
mov x17, #37581
movk x17, #43836, lsl 16
movk x17, #36286, lsl 32
movk x17, #51783, lsl 48
mov x20, #10899
movk x20, #30709, lsl 16
movk x20, #61551, lsl 32
movk x20, #45784, lsl 48
mov x21, #36612
movk x21, #63402, lsl 16
movk x21, #47623, lsl 32
movk x21, #9430, lsl 48
mul x22, x15, x10
umulh x15, x15, x10
mul x23, x17, x10
umulh x17, x17, x10
adds x15, x23, x15
cinc x17, x17, hs
mul x23, x20, x10
umulh x20, x20, x10
adds x17, x23, x17
cinc x20, x20, hs
mul x23, x21, x10
umulh x10, x21, x10
adds x20, x23, x20
cinc x10, x10, hs
adds x12, x12, x16
adcs x4, x4, x8
adcs x5, x5, x13
adcs x6, x6, x14
adc x7, x7, x9
adds x8, x12, x22
adcs x4, x4, x15
adcs x5, x5, x17
adcs x6, x6, x20
adc x7, x7, x10
adds x8, x8, x11
adcs x0, x4, x0
adcs x1, x5, x1
adcs x2, x6, x2
adc x3, x7, x3
mov x4, #65535
movk x4, #61439, lsl 16
movk x4, #62867, lsl 32
movk x4, #49889, lsl 48
mul x4, x4, x8
mov x5, #1
movk x5, #61440, lsl 16
movk x5, #62867, lsl 32
movk x5, #17377, lsl 48
mov x6, #28817
movk x6, #31161, lsl 16
movk x6, #59464, lsl 32
movk x6, #10291, lsl 48
mov x7, #22621
movk x7, #33153, lsl 16
movk x7, #17846, lsl 32
movk x7, #47184, lsl 48
mov x9, #41001
movk x9, #57649, lsl 16
movk x9, #20082, lsl 32
movk x9, #12388, lsl 48
mul x10, x5, x4
umulh x5, x5, x4
mul x11, x6, x4
umulh x6, x6, x4
adds x5, x11, x5
cinc x6, x6, hs
mul x11, x7, x4
umulh x7, x7, x4
adds x6, x11, x6
cinc x7, x7, hs
mul x11, x9, x4
umulh x4, x9, x4
adds x7, x11, x7
cinc x4, x4, hs
cmn x10, x8
adcs x0, x5, x0
adcs x1, x6, x1
adcs x2, x7, x2
adcs x5, x4, x3
adc x5, x4, x3
mov x3, #2
movk x3, #57344, lsl 16
movk x3, #60199, lsl 32
movk x3, #34755, lsl 48
mov x4, #57634
movk x4, #62322, lsl 16
movk x4, #53392, lsl 32
movk x4, #20583, lsl 48
mov x6, #45242
movk x6, #770, lsl 16
movk x6, #35693, lsl 32
movk x6, #28832, lsl 48
mov x7, #16467
movk x7, #49763, lsl 16
movk x7, #40165, lsl 32
movk x7, #24776, lsl 48
subs x3, x0, x3
sbcs x4, x1, x4
sbcs x6, x2, x6
sbcs x7, x5, x7
tst x5, #9223372036854775808
csel x0, x3, x0, mi
csel x1, x4, x1, mi
csel x2, x6, x2, mi
csel x3, x7, x5, mi
ret
|
xrvdg/modmulzoo
| 5,946
|
crates/modmul-asm/asm/single_step_load.s
|
//in("x0") a,
//in("x1") b,
//lateout("x0") a,
//lateout("x1") _, lateout("x2") _, lateout("x3") _, lateout("x4") _, lateout("x5") _, lateout("x6") _, lateout("x7") _, lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _,
//lateout("lr") _
.global _single_step_load
.align 4
.text
_single_step_load:
ldp x2, x3, [x0, #0]
ldp x4, x5, [x0, #16]
ldp x6, x7, [x1, #0]
ldp x1, x8, [x1, #16]
mul x9, x2, x6
umulh x10, x2, x6
mul x11, x3, x6
umulh x12, x3, x6
adds x10, x11, x10
cinc x11, x12, hs
mul x12, x4, x6
umulh x13, x4, x6
adds x11, x12, x11
cinc x12, x13, hs
mul x13, x5, x6
umulh x6, x5, x6
adds x12, x13, x12
cinc x6, x6, hs
mul x13, x2, x7
umulh x14, x2, x7
adds x10, x13, x10
cinc x13, x14, hs
mul x14, x3, x7
umulh x15, x3, x7
adds x13, x14, x13
cinc x14, x15, hs
adds x11, x13, x11
cinc x13, x14, hs
mul x14, x4, x7
umulh x15, x4, x7
adds x13, x14, x13
cinc x14, x15, hs
adds x12, x13, x12
cinc x13, x14, hs
mul x14, x5, x7
umulh x7, x5, x7
adds x13, x14, x13
cinc x7, x7, hs
adds x6, x13, x6
cinc x7, x7, hs
mul x13, x2, x1
umulh x14, x2, x1
adds x11, x13, x11
cinc x13, x14, hs
mul x14, x3, x1
umulh x15, x3, x1
adds x13, x14, x13
cinc x14, x15, hs
adds x12, x13, x12
cinc x13, x14, hs
mul x14, x4, x1
umulh x15, x4, x1
adds x13, x14, x13
cinc x14, x15, hs
adds x6, x13, x6
cinc x13, x14, hs
mul x14, x5, x1
umulh x1, x5, x1
adds x13, x14, x13
cinc x1, x1, hs
adds x7, x13, x7
cinc x1, x1, hs
mul x13, x2, x8
umulh x2, x2, x8
adds x12, x13, x12
cinc x2, x2, hs
mul x13, x3, x8
umulh x3, x3, x8
adds x2, x13, x2
cinc x3, x3, hs
adds x2, x2, x6
cinc x3, x3, hs
mul x6, x4, x8
umulh x4, x4, x8
adds x3, x6, x3
cinc x4, x4, hs
adds x3, x3, x7
cinc x4, x4, hs
mul x6, x5, x8
umulh x5, x5, x8
adds x4, x6, x4
cinc x5, x5, hs
adds x1, x4, x1
cinc x4, x5, hs
mov x5, #48718
movk x5, #4732, lsl 16
movk x5, #45078, lsl 32
movk x5, #39852, lsl 48
mov x6, #16676
movk x6, #12692, lsl 16
movk x6, #20986, lsl 32
movk x6, #2848, lsl 48
mov x7, #51052
movk x7, #24721, lsl 16
movk x7, #61092, lsl 32
movk x7, #45156, lsl 48
mov x8, #3197
movk x8, #18936, lsl 16
movk x8, #10922, lsl 32
movk x8, #11014, lsl 48
mul x13, x5, x9
umulh x5, x5, x9
adds x12, x13, x12
cinc x5, x5, hs
mul x13, x6, x9
umulh x6, x6, x9
adds x5, x13, x5
cinc x6, x6, hs
adds x2, x5, x2
cinc x5, x6, hs
mul x6, x7, x9
umulh x7, x7, x9
adds x5, x6, x5
cinc x6, x7, hs
adds x3, x5, x3
cinc x5, x6, hs
mul x6, x8, x9
umulh x7, x8, x9
adds x5, x6, x5
cinc x6, x7, hs
adds x1, x5, x1
cinc x5, x6, hs
add x4, x4, x5
mov x5, #56431
movk x5, #30457, lsl 16
movk x5, #30012, lsl 32
movk x5, #6382, lsl 48
mov x6, #59151
movk x6, #41769, lsl 16
movk x6, #32276, lsl 32
movk x6, #21677, lsl 48
mov x7, #34015
movk x7, #20342, lsl 16
movk x7, #13935, lsl 32
movk x7, #11030, lsl 48
mov x8, #13689
movk x8, #8159, lsl 16
movk x8, #215, lsl 32
movk x8, #4913, lsl 48
mul x9, x5, x10
umulh x5, x5, x10
adds x9, x9, x12
cinc x5, x5, hs
mul x12, x6, x10
umulh x6, x6, x10
adds x5, x12, x5
cinc x6, x6, hs
adds x2, x5, x2
cinc x5, x6, hs
mul x6, x7, x10
umulh x7, x7, x10
adds x5, x6, x5
cinc x6, x7, hs
adds x3, x5, x3
cinc x5, x6, hs
mul x6, x8, x10
umulh x7, x8, x10
adds x5, x6, x5
cinc x6, x7, hs
adds x1, x5, x1
cinc x5, x6, hs
add x4, x4, x5
mov x5, #61005
movk x5, #58262, lsl 16
movk x5, #32851, lsl 32
movk x5, #11582, lsl 48
mov x6, #37581
movk x6, #43836, lsl 16
movk x6, #36286, lsl 32
movk x6, #51783, lsl 48
mov x7, #10899
movk x7, #30709, lsl 16
movk x7, #61551, lsl 32
movk x7, #45784, lsl 48
mov x8, #36612
movk x8, #63402, lsl 16
movk x8, #47623, lsl 32
movk x8, #9430, lsl 48
mul x10, x5, x11
umulh x5, x5, x11
adds x9, x10, x9
cinc x5, x5, hs
mul x10, x6, x11
umulh x6, x6, x11
adds x5, x10, x5
cinc x6, x6, hs
adds x2, x5, x2
cinc x5, x6, hs
mul x6, x7, x11
umulh x7, x7, x11
adds x5, x6, x5
cinc x6, x7, hs
adds x3, x5, x3
cinc x5, x6, hs
mul x6, x8, x11
umulh x7, x8, x11
adds x5, x6, x5
cinc x6, x7, hs
adds x1, x5, x1
cinc x5, x6, hs
add x4, x4, x5
mov x5, #65535
movk x5, #61439, lsl 16
movk x5, #62867, lsl 32
movk x5, #49889, lsl 48
mul x5, x5, x9
mov x6, #1
movk x6, #61440, lsl 16
movk x6, #62867, lsl 32
movk x6, #17377, lsl 48
mov x7, #28817
movk x7, #31161, lsl 16
movk x7, #59464, lsl 32
movk x7, #10291, lsl 48
mov x8, #22621
movk x8, #33153, lsl 16
movk x8, #17846, lsl 32
movk x8, #47184, lsl 48
mov x10, #41001
movk x10, #57649, lsl 16
movk x10, #20082, lsl 32
movk x10, #12388, lsl 48
mul x11, x6, x5
umulh x6, x6, x5
cmn x11, x9
cinc x6, x6, hs
mul x9, x7, x5
umulh x7, x7, x5
adds x6, x9, x6
cinc x7, x7, hs
adds x2, x6, x2
cinc x6, x7, hs
mul x7, x8, x5
umulh x8, x8, x5
adds x6, x7, x6
cinc x7, x8, hs
adds x3, x6, x3
cinc x6, x7, hs
mul x7, x10, x5
umulh x5, x10, x5
adds x6, x7, x6
cinc x5, x5, hs
adds x1, x6, x1
cinc x5, x5, hs
add x4, x4, x5
mov x5, #2
movk x5, #57344, lsl 16
movk x5, #60199, lsl 32
movk x5, #34755, lsl 48
mov x6, #57634
movk x6, #62322, lsl 16
movk x6, #53392, lsl 32
movk x6, #20583, lsl 48
mov x7, #45242
movk x7, #770, lsl 16
movk x7, #35693, lsl 32
movk x7, #28832, lsl 48
mov x8, #16467
movk x8, #49763, lsl 16
movk x8, #40165, lsl 32
movk x8, #24776, lsl 48
subs x5, x2, x5
sbcs x6, x3, x6
sbcs x7, x1, x7
sbcs x8, x4, x8
tst x4, #9223372036854775808
csel x2, x5, x2, mi
csel x3, x6, x3, mi
csel x1, x7, x1, mi
csel x4, x8, x4, mi
stp x2, x3, [x0, #0]
stp x1, x4, [x0, #16]
ret
|
xrvdg/modmulzoo
| 12,824
|
crates/modmul-asm/asm/single_step_simd.s
|
//in("v0") av[0], in("v1") av[1], in("v2") av[2], in("v3") av[3],
//in("v4") bv[0], in("v5") bv[1], in("v6") bv[2], in("v7") bv[3],
//lateout("v0") outv[0], lateout("v1") outv[1], lateout("v2") outv[2], lateout("v3") outv[3],
//lateout("x0") _, lateout("x1") _, lateout("x2") _, lateout("x3") _, lateout("v4") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, lateout("v20") _, lateout("v21") _, lateout("v22") _, lateout("v23") _, lateout("v24") _,
//lateout("lr") _
.global _single_step_simd
.align 4
.text
_single_step_simd:
mov x0, #4503599627370495
dup.2d v8, x0
shl.2d v9, v1, #14
shl.2d v10, v2, #26
shl.2d v11, v3, #38
ushr.2d v3, v3, #14
shl.2d v12, v0, #2
usra.2d v9, v0, #50
usra.2d v10, v1, #38
usra.2d v11, v2, #26
and.16b v0, v12, v8
and.16b v1, v9, v8
and.16b v2, v10, v8
and.16b v9, v11, v8
shl.2d v10, v5, #14
shl.2d v11, v6, #26
shl.2d v12, v7, #38
ushr.2d v7, v7, #14
shl.2d v13, v4, #2
usra.2d v10, v4, #50
usra.2d v11, v5, #38
usra.2d v12, v6, #26
and.16b v4, v13, v8
and.16b v5, v10, v8
and.16b v6, v11, v8
and.16b v10, v12, v8
mov x1, #13605374474286268416
dup.2d v11, x1
mov x1, #6440147467139809280
dup.2d v12, x1
mov x1, #3688448094816436224
dup.2d v13, x1
mov x1, #9209861237972664320
dup.2d v14, x1
mov x1, #12218265789056155648
dup.2d v15, x1
mov x1, #17739678932212383744
dup.2d v16, x1
mov x1, #2301339409586323456
dup.2d v17, x1
mov x1, #7822752552742551552
dup.2d v18, x1
mov x1, #5071053180419178496
dup.2d v19, x1
mov x1, #16352570246982270976
dup.2d v20, x1
mov x1, #5075556780046548992
dup.2d v21, x1
mov x1, #1
movk x1, #18032, lsl 48
dup.2d v22, x1
ucvtf.2d v0, v0
ucvtf.2d v1, v1
ucvtf.2d v2, v2
ucvtf.2d v9, v9
ucvtf.2d v3, v3
ucvtf.2d v4, v4
ucvtf.2d v5, v5
ucvtf.2d v6, v6
ucvtf.2d v10, v10
ucvtf.2d v7, v7
mov.16b v23, v21
fmla.2d v23, v0, v4
fsub.2d v24, v22, v23
fmla.2d v24, v0, v4
add.2d v13, v13, v23
add.2d v11, v11, v24
mov.16b v23, v21
fmla.2d v23, v0, v5
fsub.2d v24, v22, v23
fmla.2d v24, v0, v5
add.2d v15, v15, v23
add.2d v13, v13, v24
mov.16b v23, v21
fmla.2d v23, v0, v6
fsub.2d v24, v22, v23
fmla.2d v24, v0, v6
add.2d v17, v17, v23
add.2d v15, v15, v24
mov.16b v23, v21
fmla.2d v23, v0, v10
fsub.2d v24, v22, v23
fmla.2d v24, v0, v10
add.2d v19, v19, v23
add.2d v17, v17, v24
mov.16b v23, v21
fmla.2d v23, v0, v7
fsub.2d v24, v22, v23
fmla.2d v24, v0, v7
add.2d v0, v20, v23
add.2d v19, v19, v24
mov.16b v20, v21
fmla.2d v20, v1, v4
fsub.2d v23, v22, v20
fmla.2d v23, v1, v4
add.2d v15, v15, v20
add.2d v13, v13, v23
mov.16b v20, v21
fmla.2d v20, v1, v5
fsub.2d v23, v22, v20
fmla.2d v23, v1, v5
add.2d v17, v17, v20
add.2d v15, v15, v23
mov.16b v20, v21
fmla.2d v20, v1, v6
fsub.2d v23, v22, v20
fmla.2d v23, v1, v6
add.2d v19, v19, v20
add.2d v17, v17, v23
mov.16b v20, v21
fmla.2d v20, v1, v10
fsub.2d v23, v22, v20
fmla.2d v23, v1, v10
add.2d v0, v0, v20
add.2d v19, v19, v23
mov.16b v20, v21
fmla.2d v20, v1, v7
fsub.2d v23, v22, v20
fmla.2d v23, v1, v7
add.2d v1, v18, v20
add.2d v0, v0, v23
mov.16b v18, v21
fmla.2d v18, v2, v4
fsub.2d v20, v22, v18
fmla.2d v20, v2, v4
add.2d v17, v17, v18
add.2d v15, v15, v20
mov.16b v18, v21
fmla.2d v18, v2, v5
fsub.2d v20, v22, v18
fmla.2d v20, v2, v5
add.2d v18, v19, v18
add.2d v17, v17, v20
mov.16b v19, v21
fmla.2d v19, v2, v6
fsub.2d v20, v22, v19
fmla.2d v20, v2, v6
add.2d v0, v0, v19
add.2d v18, v18, v20
mov.16b v19, v21
fmla.2d v19, v2, v10
fsub.2d v20, v22, v19
fmla.2d v20, v2, v10
add.2d v1, v1, v19
add.2d v0, v0, v20
mov.16b v19, v21
fmla.2d v19, v2, v7
fsub.2d v20, v22, v19
fmla.2d v20, v2, v7
add.2d v2, v16, v19
add.2d v1, v1, v20
mov.16b v16, v21
fmla.2d v16, v9, v4
fsub.2d v19, v22, v16
fmla.2d v19, v9, v4
add.2d v16, v18, v16
add.2d v17, v17, v19
mov.16b v18, v21
fmla.2d v18, v9, v5
fsub.2d v19, v22, v18
fmla.2d v19, v9, v5
add.2d v0, v0, v18
add.2d v16, v16, v19
mov.16b v18, v21
fmla.2d v18, v9, v6
fsub.2d v19, v22, v18
fmla.2d v19, v9, v6
add.2d v1, v1, v18
add.2d v0, v0, v19
mov.16b v18, v21
fmla.2d v18, v9, v10
fsub.2d v19, v22, v18
fmla.2d v19, v9, v10
add.2d v2, v2, v18
add.2d v1, v1, v19
mov.16b v18, v21
fmla.2d v18, v9, v7
fsub.2d v19, v22, v18
fmla.2d v19, v9, v7
add.2d v9, v14, v18
add.2d v2, v2, v19
mov.16b v14, v21
fmla.2d v14, v3, v4
fsub.2d v18, v22, v14
fmla.2d v18, v3, v4
add.2d v0, v0, v14
add.2d v4, v16, v18
mov.16b v14, v21
fmla.2d v14, v3, v5
fsub.2d v16, v22, v14
fmla.2d v16, v3, v5
add.2d v1, v1, v14
add.2d v0, v0, v16
mov.16b v5, v21
fmla.2d v5, v3, v6
fsub.2d v14, v22, v5
fmla.2d v14, v3, v6
add.2d v2, v2, v5
add.2d v1, v1, v14
mov.16b v5, v21
fmla.2d v5, v3, v10
fsub.2d v6, v22, v5
fmla.2d v6, v3, v10
add.2d v5, v9, v5
add.2d v2, v2, v6
mov.16b v6, v21
fmla.2d v6, v3, v7
fsub.2d v9, v22, v6
fmla.2d v9, v3, v7
add.2d v3, v12, v6
add.2d v5, v5, v9
usra.2d v13, v11, #52
usra.2d v15, v13, #52
usra.2d v17, v15, #52
usra.2d v4, v17, #52
and.16b v6, v11, v8
and.16b v7, v13, v8
and.16b v9, v15, v8
and.16b v8, v17, v8
ucvtf.2d v6, v6
mov x1, #37864
movk x1, #1815, lsl 16
movk x1, #28960, lsl 32
movk x1, #17153, lsl 48
dup.2d v10, x1
mov.16b v11, v21
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
fmla.2d v12, v6, v10
add.2d v0, v0, v11
add.2d v4, v4, v12
mov x1, #46128
movk x1, #29964, lsl 16
movk x1, #7587, lsl 32
movk x1, #17161, lsl 48
dup.2d v10, x1
mov.16b v11, v21
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
fmla.2d v12, v6, v10
add.2d v1, v1, v11
add.2d v0, v0, v12
mov x1, #52826
movk x1, #57790, lsl 16
movk x1, #55431, lsl 32
movk x1, #17196, lsl 48
dup.2d v10, x1
mov.16b v11, v21
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
fmla.2d v12, v6, v10
add.2d v2, v2, v11
add.2d v1, v1, v12
mov x1, #31276
movk x1, #21262, lsl 16
movk x1, #2304, lsl 32
movk x1, #17182, lsl 48
dup.2d v10, x1
mov.16b v11, v21
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
fmla.2d v12, v6, v10
add.2d v5, v5, v11
add.2d v2, v2, v12
mov x1, #28672
movk x1, #24515, lsl 16
movk x1, #54929, lsl 32
movk x1, #17064, lsl 48
dup.2d v10, x1
mov.16b v11, v21
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
fmla.2d v12, v6, v10
add.2d v3, v3, v11
add.2d v5, v5, v12
ucvtf.2d v6, v7
mov x1, #44768
movk x1, #51919, lsl 16
movk x1, #6346, lsl 32
movk x1, #17133, lsl 48
dup.2d v7, x1
mov.16b v10, v21
fmla.2d v10, v6, v7
fsub.2d v11, v22, v10
fmla.2d v11, v6, v7
add.2d v0, v0, v10
add.2d v4, v4, v11
mov x1, #47492
movk x1, #23630, lsl 16
movk x1, #49985, lsl 32
movk x1, #17168, lsl 48
dup.2d v7, x1
mov.16b v10, v21
fmla.2d v10, v6, v7
fsub.2d v11, v22, v10
fmla.2d v11, v6, v7
add.2d v1, v1, v10
add.2d v0, v0, v11
mov x1, #57936
movk x1, #54828, lsl 16
movk x1, #18292, lsl 32
movk x1, #17197, lsl 48
dup.2d v7, x1
mov.16b v10, v21
fmla.2d v10, v6, v7
fsub.2d v11, v22, v10
fmla.2d v11, v6, v7
add.2d v2, v2, v10
add.2d v1, v1, v11
mov x1, #17708
movk x1, #43915, lsl 16
movk x1, #64348, lsl 32
movk x1, #17188, lsl 48
dup.2d v7, x1
mov.16b v10, v21
fmla.2d v10, v6, v7
fsub.2d v11, v22, v10
fmla.2d v11, v6, v7
add.2d v5, v5, v10
add.2d v2, v2, v11
mov x1, #29184
movk x1, #20789, lsl 16
movk x1, #19197, lsl 32
movk x1, #17083, lsl 48
dup.2d v7, x1
mov.16b v10, v21
fmla.2d v10, v6, v7
fsub.2d v11, v22, v10
fmla.2d v11, v6, v7
add.2d v3, v3, v10
add.2d v5, v5, v11
ucvtf.2d v6, v9
mov x1, #58856
movk x1, #14953, lsl 16
movk x1, #15155, lsl 32
movk x1, #17181, lsl 48
dup.2d v7, x1
mov.16b v9, v21
fmla.2d v9, v6, v7
fsub.2d v10, v22, v9
fmla.2d v10, v6, v7
add.2d v0, v0, v9
add.2d v4, v4, v10
mov x1, #35392
movk x1, #12477, lsl 16
movk x1, #56780, lsl 32
movk x1, #17142, lsl 48
dup.2d v7, x1
mov.16b v9, v21
fmla.2d v9, v6, v7
fsub.2d v10, v22, v9
fmla.2d v10, v6, v7
add.2d v1, v1, v9
add.2d v0, v0, v10
mov x1, #9848
movk x1, #54501, lsl 16
movk x1, #31540, lsl 32
movk x1, #17170, lsl 48
dup.2d v7, x1
mov.16b v9, v21
fmla.2d v9, v6, v7
fsub.2d v10, v22, v9
fmla.2d v10, v6, v7
add.2d v2, v2, v9
add.2d v1, v1, v10
mov x1, #9584
movk x1, #63883, lsl 16
movk x1, #18253, lsl 32
movk x1, #17190, lsl 48
dup.2d v7, x1
mov.16b v9, v21
fmla.2d v9, v6, v7
fsub.2d v10, v22, v9
fmla.2d v10, v6, v7
add.2d v5, v5, v9
add.2d v2, v2, v10
mov x1, #51712
movk x1, #16093, lsl 16
movk x1, #30633, lsl 32
movk x1, #17068, lsl 48
dup.2d v7, x1
mov.16b v9, v21
fmla.2d v9, v6, v7
fsub.2d v10, v22, v9
fmla.2d v10, v6, v7
add.2d v3, v3, v9
add.2d v5, v5, v10
ucvtf.2d v6, v8
mov x1, #34724
movk x1, #40393, lsl 16
movk x1, #23752, lsl 32
movk x1, #17184, lsl 48
dup.2d v7, x1
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v0, v0, v8
add.2d v4, v4, v9
mov x1, #25532
movk x1, #31025, lsl 16
movk x1, #10002, lsl 32
movk x1, #17199, lsl 48
dup.2d v7, x1
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v1, v1, v8
add.2d v0, v0, v9
mov x1, #18830
movk x1, #2465, lsl 16
movk x1, #36348, lsl 32
movk x1, #17194, lsl 48
dup.2d v7, x1
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v2, v2, v8
add.2d v1, v1, v9
mov x1, #21566
movk x1, #43708, lsl 16
movk x1, #57685, lsl 32
movk x1, #17185, lsl 48
dup.2d v7, x1
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v5, v5, v8
add.2d v2, v2, v9
mov x1, #3072
movk x1, #8058, lsl 16
movk x1, #46097, lsl 32
movk x1, #17047, lsl 48
dup.2d v7, x1
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v3, v3, v8
add.2d v5, v5, v9
mov x1, #65535
movk x1, #61439, lsl 16
movk x1, #62867, lsl 32
movk x1, #1, lsl 48
umov x2, v4.d[0]
umov x3, v4.d[1]
mul x2, x2, x1
mul x1, x3, x1
and x2, x2, x0
and x0, x1, x0
ins v6.d[0], x2
ins v6.d[1], x0
ucvtf.2d v6, v6
mov x0, #16
movk x0, #22847, lsl 32
movk x0, #17151, lsl 48
dup.2d v7, x0
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v0, v0, v8
add.2d v4, v4, v9
mov x0, #20728
movk x0, #23588, lsl 16
movk x0, #7790, lsl 32
movk x0, #17170, lsl 48
dup.2d v7, x0
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v1, v1, v8
add.2d v0, v0, v9
mov x0, #16000
movk x0, #53891, lsl 16
movk x0, #5509, lsl 32
movk x0, #17144, lsl 48
dup.2d v7, x0
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v2, v2, v8
add.2d v1, v1, v9
mov x0, #46800
movk x0, #2568, lsl 16
movk x0, #1335, lsl 32
movk x0, #17188, lsl 48
dup.2d v7, x0
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v5, v5, v8
add.2d v2, v2, v9
mov x0, #39040
movk x0, #14704, lsl 16
movk x0, #12839, lsl 32
movk x0, #17096, lsl 48
dup.2d v7, x0
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v3, v3, v8
add.2d v5, v5, v9
mov x0, #140737488355328
dup.2d v6, x0
and.16b v6, v3, v6
cmeq.2d v6, v6, #0
mov x0, #2
movk x0, #57344, lsl 16
movk x0, #60199, lsl 32
movk x0, #3, lsl 48
dup.2d v7, x0
bic.16b v7, v7, v6
mov x0, #10364
movk x0, #11794, lsl 16
movk x0, #3895, lsl 32
movk x0, #9, lsl 48
dup.2d v8, x0
bic.16b v8, v8, v6
mov x0, #26576
movk x0, #47696, lsl 16
movk x0, #688, lsl 32
movk x0, #3, lsl 48
dup.2d v9, x0
bic.16b v9, v9, v6
mov x0, #46800
movk x0, #2568, lsl 16
movk x0, #1335, lsl 32
movk x0, #4, lsl 48
dup.2d v10, x0
bic.16b v10, v10, v6
mov x0, #49763
movk x0, #40165, lsl 16
movk x0, #24776, lsl 32
dup.2d v11, x0
bic.16b v6, v11, v6
sub.2d v0, v0, v7
ssra.2d v0, v4, #52
sub.2d v4, v1, v8
ssra.2d v4, v0, #52
sub.2d v7, v2, v9
ssra.2d v7, v4, #52
sub.2d v5, v5, v10
ssra.2d v5, v7, #52
sub.2d v6, v3, v6
ssra.2d v6, v5, #52
ushr.2d v1, v4, #12
ushr.2d v2, v7, #24
ushr.2d v3, v5, #36
sli.2d v0, v4, #52
sli.2d v1, v7, #40
sli.2d v2, v5, #28
sli.2d v3, v6, #16
ret
|
xrvdg/modmulzoo
| 1,525
|
crates/modmul-asm/asm/reduce_ct_simd.s
|
//in("v0") red[0], in("v1") red[1], in("v2") red[2], in("v3") red[3], in("v4") red[4], in("v5") red[5],
//lateout("v0") out[0], lateout("v1") out[1], lateout("v2") out[2], lateout("v3") out[3], lateout("v4") out[4],
//lateout("x0") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _,
//lateout("lr") _
.global _reduce_ct_simd
.align 4
.text
_reduce_ct_simd:
mov x0, #4503599627370495
dup.2d v6, x0
mov x0, #140737488355328
dup.2d v7, x0
and.16b v7, v5, v7
cmeq.2d v7, v7, #0
mov x0, #2
movk x0, #57344, lsl 16
movk x0, #60199, lsl 32
movk x0, #3, lsl 48
dup.2d v8, x0
bic.16b v8, v8, v7
mov x0, #10364
movk x0, #11794, lsl 16
movk x0, #3895, lsl 32
movk x0, #9, lsl 48
dup.2d v9, x0
bic.16b v9, v9, v7
mov x0, #26576
movk x0, #47696, lsl 16
movk x0, #688, lsl 32
movk x0, #3, lsl 48
dup.2d v10, x0
bic.16b v10, v10, v7
mov x0, #46800
movk x0, #2568, lsl 16
movk x0, #1335, lsl 32
movk x0, #4, lsl 48
dup.2d v11, x0
bic.16b v11, v11, v7
mov x0, #49763
movk x0, #40165, lsl 16
movk x0, #24776, lsl 32
dup.2d v12, x0
bic.16b v7, v12, v7
sub.2d v1, v1, v8
ssra.2d v1, v0, #52
sub.2d v2, v2, v9
ssra.2d v2, v1, #52
sub.2d v3, v3, v10
ssra.2d v3, v2, #52
sub.2d v4, v4, v11
ssra.2d v4, v3, #52
sub.2d v5, v5, v7
ssra.2d v5, v4, #52
and.16b v0, v1, v6
and.16b v1, v2, v6
and.16b v2, v3, v6
and.16b v3, v4, v6
and.16b v4, v5, v6
ret
|
xrvdg/modmulzoo
| 2,070
|
crates/modmul-asm/asm/school_method.s
|
//in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3],
//in("x4") b[0], in("x5") b[1], in("x6") b[2], in("x7") b[3],
//lateout("x8") out[0], lateout("x9") out[1], lateout("x10") out[2], lateout("x11") out[3], lateout("x4") out[4], lateout("x5") out[5], lateout("x6") out[6], lateout("x7") out[7],
//lateout("x0") _, lateout("x1") _, lateout("x2") _, lateout("x3") _, lateout("x12") _, lateout("x13") _, lateout("x14") _,
//lateout("lr") _
.global _school_method
.align 4
.text
_school_method:
mul x8, x0, x4
umulh x9, x0, x4
mul x10, x1, x4
umulh x11, x1, x4
adds x9, x10, x9
cinc x10, x11, hs
mul x11, x2, x4
umulh x12, x2, x4
adds x10, x11, x10
cinc x11, x12, hs
mul x12, x3, x4
umulh x4, x3, x4
adds x11, x12, x11
cinc x4, x4, hs
mul x12, x0, x5
umulh x13, x0, x5
adds x9, x12, x9
cinc x12, x13, hs
mul x13, x1, x5
umulh x14, x1, x5
adds x12, x13, x12
cinc x13, x14, hs
adds x10, x12, x10
cinc x12, x13, hs
mul x13, x2, x5
umulh x14, x2, x5
adds x12, x13, x12
cinc x13, x14, hs
adds x11, x12, x11
cinc x12, x13, hs
mul x13, x3, x5
umulh x5, x3, x5
adds x12, x13, x12
cinc x5, x5, hs
adds x4, x12, x4
cinc x5, x5, hs
mul x12, x0, x6
umulh x13, x0, x6
adds x10, x12, x10
cinc x12, x13, hs
mul x13, x1, x6
umulh x14, x1, x6
adds x12, x13, x12
cinc x13, x14, hs
adds x11, x12, x11
cinc x12, x13, hs
mul x13, x2, x6
umulh x14, x2, x6
adds x12, x13, x12
cinc x13, x14, hs
adds x4, x12, x4
cinc x12, x13, hs
mul x13, x3, x6
umulh x6, x3, x6
adds x12, x13, x12
cinc x6, x6, hs
adds x5, x12, x5
cinc x6, x6, hs
mul x12, x0, x7
umulh x13, x0, x7
adds x11, x12, x11
cinc x12, x13, hs
mul x13, x1, x7
umulh x14, x1, x7
adds x12, x13, x12
cinc x13, x14, hs
adds x4, x12, x4
cinc x12, x13, hs
mul x13, x2, x7
umulh x14, x2, x7
adds x12, x13, x12
cinc x13, x14, hs
adds x5, x12, x5
cinc x12, x13, hs
mul x13, x3, x7
umulh x7, x3, x7
adds x12, x13, x12
cinc x7, x7, hs
adds x6, x12, x6
cinc x7, x7, hs
ret
|
xrvdg/modmulzoo
| 18,678
|
crates/modmul-asm/asm/single_step_interleaved.s
|
//in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3],
//in("x4") b[0], in("x5") b[1], in("x6") b[2], in("x7") b[3],
//in("v0") av[0], in("v1") av[1], in("v2") av[2], in("v3") av[3],
//in("v4") bv[0], in("v5") bv[1], in("v6") bv[2], in("v7") bv[3],
//lateout("x0") out[0], lateout("x1") out[1], lateout("x2") out[2], lateout("x3") out[3],
//lateout("v0") outv[0], lateout("v1") outv[1], lateout("v2") outv[2], lateout("v3") outv[3],
//lateout("x4") _, lateout("x5") _, lateout("x6") _, lateout("x7") _, lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _, lateout("x16") _, lateout("v4") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, lateout("v20") _, lateout("v21") _, lateout("v22") _, lateout("v23") _, lateout("v24") _,
//lateout("lr") _
.global _single_step_interleaved
.align 4
.text
_single_step_interleaved:
mov x8, #4503599627370495
dup.2d v8, x8
mul x9, x0, x4
shl.2d v9, v1, #14
shl.2d v10, v2, #26
shl.2d v11, v3, #38
umulh x10, x0, x4
ushr.2d v3, v3, #14
shl.2d v12, v0, #2
usra.2d v9, v0, #50
mul x11, x1, x4
usra.2d v10, v1, #38
usra.2d v11, v2, #26
umulh x12, x1, x4
and.16b v0, v12, v8
and.16b v1, v9, v8
and.16b v2, v10, v8
adds x10, x11, x10
cinc x11, x12, hs
and.16b v9, v11, v8
shl.2d v10, v5, #14
shl.2d v11, v6, #26
mul x12, x2, x4
shl.2d v12, v7, #38
ushr.2d v7, v7, #14
umulh x13, x2, x4
shl.2d v13, v4, #2
usra.2d v10, v4, #50
usra.2d v11, v5, #38
adds x11, x12, x11
cinc x12, x13, hs
usra.2d v12, v6, #26
and.16b v4, v13, v8
and.16b v5, v10, v8
mul x13, x3, x4
and.16b v6, v11, v8
and.16b v10, v12, v8
mov x14, #13605374474286268416
umulh x4, x3, x4
dup.2d v11, x14
mov x14, #6440147467139809280
adds x12, x13, x12
cinc x4, x4, hs
dup.2d v12, x14
mov x13, #3688448094816436224
dup.2d v13, x13
mul x13, x0, x5
mov x14, #9209861237972664320
dup.2d v14, x14
mov x14, #12218265789056155648
umulh x15, x0, x5
dup.2d v15, x14
mov x14, #17739678932212383744
adds x10, x13, x10
cinc x13, x15, hs
dup.2d v16, x14
mov x14, #2301339409586323456
dup.2d v17, x14
mul x14, x1, x5
mov x15, #7822752552742551552
dup.2d v18, x15
mov x15, #5071053180419178496
umulh x16, x1, x5
dup.2d v19, x15
mov x15, #16352570246982270976
adds x13, x14, x13
cinc x14, x16, hs
dup.2d v20, x15
mov x15, #5075556780046548992
dup.2d v21, x15
adds x11, x13, x11
cinc x13, x14, hs
mov x14, #1
movk x14, #18032, lsl 48
dup.2d v22, x14
mul x14, x2, x5
ucvtf.2d v0, v0
ucvtf.2d v1, v1
ucvtf.2d v2, v2
umulh x15, x2, x5
ucvtf.2d v9, v9
ucvtf.2d v3, v3
adds x13, x14, x13
cinc x14, x15, hs
ucvtf.2d v4, v4
ucvtf.2d v5, v5
ucvtf.2d v6, v6
adds x12, x13, x12
cinc x13, x14, hs
ucvtf.2d v10, v10
ucvtf.2d v7, v7
mov.16b v23, v21
mul x14, x3, x5
fmla.2d v23, v0, v4
fsub.2d v24, v22, v23
umulh x5, x3, x5
fmla.2d v24, v0, v4
add.2d v13, v13, v23
add.2d v11, v11, v24
adds x13, x14, x13
cinc x5, x5, hs
mov.16b v23, v21
fmla.2d v23, v0, v5
fsub.2d v24, v22, v23
adds x4, x13, x4
cinc x5, x5, hs
fmla.2d v24, v0, v5
add.2d v15, v15, v23
mul x13, x0, x6
add.2d v13, v13, v24
mov.16b v23, v21
fmla.2d v23, v0, v6
umulh x14, x0, x6
fsub.2d v24, v22, v23
fmla.2d v24, v0, v6
add.2d v17, v17, v23
adds x11, x13, x11
cinc x13, x14, hs
add.2d v15, v15, v24
mov.16b v23, v21
fmla.2d v23, v0, v10
mul x14, x1, x6
fsub.2d v24, v22, v23
fmla.2d v24, v0, v10
umulh x15, x1, x6
add.2d v19, v19, v23
add.2d v17, v17, v24
mov.16b v23, v21
adds x13, x14, x13
cinc x14, x15, hs
fmla.2d v23, v0, v7
fsub.2d v24, v22, v23
fmla.2d v24, v0, v7
adds x12, x13, x12
cinc x13, x14, hs
add.2d v0, v20, v23
add.2d v19, v19, v24
mul x14, x2, x6
mov.16b v20, v21
fmla.2d v20, v1, v4
fsub.2d v23, v22, v20
umulh x15, x2, x6
fmla.2d v23, v1, v4
add.2d v15, v15, v20
add.2d v13, v13, v23
adds x13, x14, x13
cinc x14, x15, hs
mov.16b v20, v21
fmla.2d v20, v1, v5
adds x4, x13, x4
cinc x13, x14, hs
fsub.2d v23, v22, v20
fmla.2d v23, v1, v5
add.2d v17, v17, v20
mul x14, x3, x6
add.2d v15, v15, v23
mov.16b v20, v21
fmla.2d v20, v1, v6
umulh x6, x3, x6
fsub.2d v23, v22, v20
fmla.2d v23, v1, v6
add.2d v19, v19, v20
adds x13, x14, x13
cinc x6, x6, hs
add.2d v17, v17, v23
mov.16b v20, v21
adds x5, x13, x5
cinc x6, x6, hs
fmla.2d v20, v1, v10
fsub.2d v23, v22, v20
fmla.2d v23, v1, v10
mul x13, x0, x7
add.2d v0, v0, v20
add.2d v19, v19, v23
mov.16b v20, v21
umulh x0, x0, x7
fmla.2d v20, v1, v7
fsub.2d v23, v22, v20
adds x12, x13, x12
cinc x0, x0, hs
fmla.2d v23, v1, v7
add.2d v1, v18, v20
add.2d v0, v0, v23
mul x13, x1, x7
mov.16b v18, v21
fmla.2d v18, v2, v4
fsub.2d v20, v22, v18
umulh x1, x1, x7
fmla.2d v20, v2, v4
add.2d v17, v17, v18
adds x0, x13, x0
cinc x1, x1, hs
add.2d v15, v15, v20
mov.16b v18, v21
fmla.2d v18, v2, v5
adds x0, x0, x4
cinc x1, x1, hs
fsub.2d v20, v22, v18
fmla.2d v20, v2, v5
add.2d v18, v19, v18
mul x4, x2, x7
add.2d v17, v17, v20
mov.16b v19, v21
fmla.2d v19, v2, v6
umulh x2, x2, x7
fsub.2d v20, v22, v19
fmla.2d v20, v2, v6
adds x1, x4, x1
cinc x2, x2, hs
add.2d v0, v0, v19
add.2d v18, v18, v20
mov.16b v19, v21
adds x1, x1, x5
cinc x2, x2, hs
fmla.2d v19, v2, v10
fsub.2d v20, v22, v19
fmla.2d v20, v2, v10
mul x4, x3, x7
add.2d v1, v1, v19
add.2d v0, v0, v20
umulh x3, x3, x7
mov.16b v19, v21
fmla.2d v19, v2, v7
fsub.2d v20, v22, v19
adds x2, x4, x2
cinc x3, x3, hs
fmla.2d v20, v2, v7
add.2d v2, v16, v19
add.2d v1, v1, v20
adds x2, x2, x6
cinc x3, x3, hs
mov.16b v16, v21
fmla.2d v16, v9, v4
mov x4, #48718
fsub.2d v19, v22, v16
fmla.2d v19, v9, v4
add.2d v16, v18, v16
movk x4, #4732, lsl 16
add.2d v17, v17, v19
mov.16b v18, v21
fmla.2d v18, v9, v5
movk x4, #45078, lsl 32
fsub.2d v19, v22, v18
fmla.2d v19, v9, v5
add.2d v0, v0, v18
movk x4, #39852, lsl 48
add.2d v16, v16, v19
mov.16b v18, v21
mov x5, #16676
fmla.2d v18, v9, v6
fsub.2d v19, v22, v18
fmla.2d v19, v9, v6
movk x5, #12692, lsl 16
add.2d v1, v1, v18
add.2d v0, v0, v19
mov.16b v18, v21
movk x5, #20986, lsl 32
fmla.2d v18, v9, v10
fsub.2d v19, v22, v18
movk x5, #2848, lsl 48
fmla.2d v19, v9, v10
add.2d v2, v2, v18
add.2d v1, v1, v19
mov x6, #51052
mov.16b v18, v21
fmla.2d v18, v9, v7
fsub.2d v19, v22, v18
movk x6, #24721, lsl 16
fmla.2d v19, v9, v7
add.2d v9, v14, v18
movk x6, #61092, lsl 32
add.2d v2, v2, v19
mov.16b v14, v21
fmla.2d v14, v3, v4
movk x6, #45156, lsl 48
fsub.2d v18, v22, v14
fmla.2d v18, v3, v4
add.2d v0, v0, v14
mov x7, #3197
add.2d v4, v16, v18
mov.16b v14, v21
fmla.2d v14, v3, v5
movk x7, #18936, lsl 16
fsub.2d v16, v22, v14
fmla.2d v16, v3, v5
movk x7, #10922, lsl 32
add.2d v1, v1, v14
add.2d v0, v0, v16
mov.16b v5, v21
movk x7, #11014, lsl 48
fmla.2d v5, v3, v6
fsub.2d v14, v22, v5
fmla.2d v14, v3, v6
mul x13, x4, x9
add.2d v2, v2, v5
add.2d v1, v1, v14
umulh x4, x4, x9
mov.16b v5, v21
fmla.2d v5, v3, v10
fsub.2d v6, v22, v5
adds x12, x13, x12
cinc x4, x4, hs
fmla.2d v6, v3, v10
add.2d v5, v9, v5
add.2d v2, v2, v6
mul x13, x5, x9
mov.16b v6, v21
fmla.2d v6, v3, v7
umulh x5, x5, x9
fsub.2d v9, v22, v6
fmla.2d v9, v3, v7
add.2d v3, v12, v6
adds x4, x13, x4
cinc x5, x5, hs
add.2d v5, v5, v9
usra.2d v13, v11, #52
usra.2d v15, v13, #52
adds x0, x4, x0
cinc x4, x5, hs
usra.2d v17, v15, #52
usra.2d v4, v17, #52
and.16b v6, v11, v8
mul x5, x6, x9
and.16b v7, v13, v8
and.16b v9, v15, v8
umulh x6, x6, x9
and.16b v8, v17, v8
ucvtf.2d v6, v6
mov x13, #37864
adds x4, x5, x4
cinc x5, x6, hs
movk x13, #1815, lsl 16
movk x13, #28960, lsl 32
movk x13, #17153, lsl 48
adds x1, x4, x1
cinc x4, x5, hs
dup.2d v10, x13
mov.16b v11, v21
mul x5, x7, x9
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
fmla.2d v12, v6, v10
umulh x6, x7, x9
add.2d v0, v0, v11
add.2d v4, v4, v12
mov x7, #46128
adds x4, x5, x4
cinc x5, x6, hs
movk x7, #29964, lsl 16
movk x7, #7587, lsl 32
adds x2, x4, x2
cinc x4, x5, hs
movk x7, #17161, lsl 48
dup.2d v10, x7
mov.16b v11, v21
add x3, x3, x4
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
fmla.2d v12, v6, v10
mov x4, #56431
add.2d v1, v1, v11
add.2d v0, v0, v12
mov x5, #52826
movk x4, #30457, lsl 16
movk x5, #57790, lsl 16
movk x5, #55431, lsl 32
movk x4, #30012, lsl 32
movk x5, #17196, lsl 48
dup.2d v10, x5
mov.16b v11, v21
movk x4, #6382, lsl 48
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
fmla.2d v12, v6, v10
mov x5, #59151
add.2d v2, v2, v11
add.2d v1, v1, v12
movk x5, #41769, lsl 16
mov x6, #31276
movk x6, #21262, lsl 16
movk x6, #2304, lsl 32
movk x5, #32276, lsl 32
movk x6, #17182, lsl 48
dup.2d v10, x6
mov.16b v11, v21
movk x5, #21677, lsl 48
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
mov x6, #34015
fmla.2d v12, v6, v10
add.2d v5, v5, v11
add.2d v2, v2, v12
movk x6, #20342, lsl 16
mov x7, #28672
movk x7, #24515, lsl 16
movk x7, #54929, lsl 32
movk x6, #13935, lsl 32
movk x7, #17064, lsl 48
dup.2d v10, x7
mov.16b v11, v21
movk x6, #11030, lsl 48
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
mov x7, #13689
fmla.2d v12, v6, v10
add.2d v3, v3, v11
add.2d v5, v5, v12
movk x7, #8159, lsl 16
ucvtf.2d v6, v7
mov x9, #44768
movk x9, #51919, lsl 16
movk x7, #215, lsl 32
movk x9, #6346, lsl 32
movk x9, #17133, lsl 48
movk x7, #4913, lsl 48
dup.2d v7, x9
mov.16b v10, v21
fmla.2d v10, v6, v7
mul x9, x4, x10
fsub.2d v11, v22, v10
fmla.2d v11, v6, v7
add.2d v0, v0, v10
umulh x4, x4, x10
add.2d v4, v4, v11
mov x13, #47492
adds x9, x9, x12
cinc x4, x4, hs
movk x13, #23630, lsl 16
movk x13, #49985, lsl 32
movk x13, #17168, lsl 48
mul x12, x5, x10
dup.2d v7, x13
mov.16b v10, v21
fmla.2d v10, v6, v7
umulh x5, x5, x10
fsub.2d v11, v22, v10
fmla.2d v11, v6, v7
add.2d v1, v1, v10
adds x4, x12, x4
cinc x5, x5, hs
add.2d v0, v0, v11
mov x12, #57936
adds x0, x4, x0
cinc x4, x5, hs
movk x12, #54828, lsl 16
movk x12, #18292, lsl 32
movk x12, #17197, lsl 48
mul x5, x6, x10
dup.2d v7, x12
mov.16b v10, v21
fmla.2d v10, v6, v7
umulh x6, x6, x10
fsub.2d v11, v22, v10
fmla.2d v11, v6, v7
adds x4, x5, x4
cinc x5, x6, hs
add.2d v2, v2, v10
add.2d v1, v1, v11
mov x6, #17708
adds x1, x4, x1
cinc x4, x5, hs
movk x6, #43915, lsl 16
movk x6, #64348, lsl 32
movk x6, #17188, lsl 48
mul x5, x7, x10
dup.2d v7, x6
mov.16b v10, v21
umulh x6, x7, x10
fmla.2d v10, v6, v7
fsub.2d v11, v22, v10
fmla.2d v11, v6, v7
adds x4, x5, x4
cinc x5, x6, hs
add.2d v5, v5, v10
add.2d v2, v2, v11
mov x6, #29184
adds x2, x4, x2
cinc x4, x5, hs
movk x6, #20789, lsl 16
movk x6, #19197, lsl 32
movk x6, #17083, lsl 48
add x3, x3, x4
dup.2d v7, x6
mov.16b v10, v21
mov x4, #61005
fmla.2d v10, v6, v7
fsub.2d v11, v22, v10
fmla.2d v11, v6, v7
movk x4, #58262, lsl 16
add.2d v3, v3, v10
add.2d v5, v5, v11
ucvtf.2d v6, v9
movk x4, #32851, lsl 32
mov x5, #58856
movk x5, #14953, lsl 16
movk x4, #11582, lsl 48
movk x5, #15155, lsl 32
movk x5, #17181, lsl 48
dup.2d v7, x5
mov x5, #37581
mov.16b v9, v21
fmla.2d v9, v6, v7
fsub.2d v10, v22, v9
movk x5, #43836, lsl 16
fmla.2d v10, v6, v7
add.2d v0, v0, v9
movk x5, #36286, lsl 32
add.2d v4, v4, v10
mov x6, #35392
movk x6, #12477, lsl 16
movk x5, #51783, lsl 48
movk x6, #56780, lsl 32
movk x6, #17142, lsl 48
dup.2d v7, x6
mov x6, #10899
mov.16b v9, v21
fmla.2d v9, v6, v7
fsub.2d v10, v22, v9
movk x6, #30709, lsl 16
fmla.2d v10, v6, v7
add.2d v1, v1, v9
movk x6, #61551, lsl 32
add.2d v0, v0, v10
mov x7, #9848
movk x7, #54501, lsl 16
movk x6, #45784, lsl 48
movk x7, #31540, lsl 32
movk x7, #17170, lsl 48
dup.2d v7, x7
mov x7, #36612
mov.16b v9, v21
fmla.2d v9, v6, v7
movk x7, #63402, lsl 16
fsub.2d v10, v22, v9
fmla.2d v10, v6, v7
add.2d v2, v2, v9
movk x7, #47623, lsl 32
add.2d v1, v1, v10
mov x10, #9584
movk x10, #63883, lsl 16
movk x7, #9430, lsl 48
movk x10, #18253, lsl 32
movk x10, #17190, lsl 48
mul x12, x4, x11
dup.2d v7, x10
mov.16b v9, v21
fmla.2d v9, v6, v7
umulh x4, x4, x11
fsub.2d v10, v22, v9
fmla.2d v10, v6, v7
add.2d v5, v5, v9
adds x9, x12, x9
cinc x4, x4, hs
add.2d v2, v2, v10
mov x10, #51712
movk x10, #16093, lsl 16
mul x12, x5, x11
movk x10, #30633, lsl 32
movk x10, #17068, lsl 48
umulh x5, x5, x11
dup.2d v7, x10
mov.16b v9, v21
fmla.2d v9, v6, v7
adds x4, x12, x4
cinc x5, x5, hs
fsub.2d v10, v22, v9
fmla.2d v10, v6, v7
add.2d v3, v3, v9
adds x0, x4, x0
cinc x4, x5, hs
add.2d v5, v5, v10
ucvtf.2d v6, v8
mul x5, x6, x11
mov x10, #34724
movk x10, #40393, lsl 16
movk x10, #23752, lsl 32
umulh x6, x6, x11
movk x10, #17184, lsl 48
dup.2d v7, x10
mov.16b v8, v21
adds x4, x5, x4
cinc x5, x6, hs
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
adds x1, x4, x1
cinc x4, x5, hs
fmla.2d v9, v6, v7
add.2d v0, v0, v8
add.2d v4, v4, v9
mul x5, x7, x11
mov x6, #25532
movk x6, #31025, lsl 16
movk x6, #10002, lsl 32
umulh x7, x7, x11
movk x6, #17199, lsl 48
dup.2d v7, x6
mov.16b v8, v21
adds x4, x5, x4
cinc x5, x7, hs
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
adds x2, x4, x2
cinc x4, x5, hs
fmla.2d v9, v6, v7
add.2d v1, v1, v8
add.2d v0, v0, v9
add x3, x3, x4
mov x4, #18830
movk x4, #2465, lsl 16
movk x4, #36348, lsl 32
mov x5, #65535
movk x4, #17194, lsl 48
dup.2d v7, x4
movk x5, #61439, lsl 16
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
movk x5, #62867, lsl 32
fmla.2d v9, v6, v7
add.2d v2, v2, v8
add.2d v1, v1, v9
movk x5, #49889, lsl 48
mov x4, #21566
movk x4, #43708, lsl 16
mul x5, x5, x9
movk x4, #57685, lsl 32
movk x4, #17185, lsl 48
dup.2d v7, x4
mov x4, #1
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
movk x4, #61440, lsl 16
fmla.2d v9, v6, v7
add.2d v5, v5, v8
add.2d v2, v2, v9
movk x4, #62867, lsl 32
mov x6, #3072
movk x6, #8058, lsl 16
movk x4, #17377, lsl 48
movk x6, #46097, lsl 32
movk x6, #17047, lsl 48
dup.2d v7, x6
mov x6, #28817
mov.16b v8, v21
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
movk x6, #31161, lsl 16
fmla.2d v9, v6, v7
add.2d v3, v3, v8
movk x6, #59464, lsl 32
add.2d v5, v5, v9
mov x7, #65535
movk x7, #61439, lsl 16
movk x6, #10291, lsl 48
movk x7, #62867, lsl 32
movk x7, #1, lsl 48
umov x10, v4.d[0]
mov x11, #22621
umov x12, v4.d[1]
mul x10, x10, x7
movk x11, #33153, lsl 16
mul x7, x12, x7
and x10, x10, x8
and x7, x7, x8
movk x11, #17846, lsl 32
ins v6.d[0], x10
ins v6.d[1], x7
ucvtf.2d v6, v6
mov x7, #16
movk x11, #47184, lsl 48
movk x7, #22847, lsl 32
movk x7, #17151, lsl 48
dup.2d v7, x7
mov x7, #41001
mov.16b v8, v21
fmla.2d v8, v6, v7
movk x7, #57649, lsl 16
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v0, v0, v8
movk x7, #20082, lsl 32
add.2d v4, v4, v9
mov x8, #20728
movk x8, #23588, lsl 16
movk x7, #12388, lsl 48
movk x8, #7790, lsl 32
movk x8, #17170, lsl 48
mul x10, x4, x5
dup.2d v7, x8
mov.16b v8, v21
fmla.2d v8, v6, v7
umulh x4, x4, x5
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v1, v1, v8
cmn x10, x9
cinc x4, x4, hs
add.2d v0, v0, v9
mov x8, #16000
mul x9, x6, x5
movk x8, #53891, lsl 16
movk x8, #5509, lsl 32
movk x8, #17144, lsl 48
umulh x6, x6, x5
dup.2d v7, x8
mov.16b v8, v21
fmla.2d v8, v6, v7
adds x4, x9, x4
cinc x6, x6, hs
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
add.2d v2, v2, v8
adds x0, x4, x0
cinc x4, x6, hs
add.2d v1, v1, v9
mov x6, #46800
mul x8, x11, x5
movk x6, #2568, lsl 16
movk x6, #1335, lsl 32
movk x6, #17188, lsl 48
umulh x9, x11, x5
dup.2d v7, x6
mov.16b v8, v21
fmla.2d v8, v6, v7
adds x4, x8, x4
cinc x6, x9, hs
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
adds x1, x4, x1
cinc x4, x6, hs
add.2d v5, v5, v8
add.2d v2, v2, v9
mov x6, #39040
mul x8, x7, x5
movk x6, #14704, lsl 16
movk x6, #12839, lsl 32
movk x6, #17096, lsl 48
umulh x5, x7, x5
dup.2d v7, x6
mov.16b v8, v21
adds x4, x8, x4
cinc x5, x5, hs
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
adds x2, x4, x2
cinc x4, x5, hs
add.2d v3, v3, v8
add.2d v5, v5, v9
mov x5, #140737488355328
add x3, x3, x4
dup.2d v6, x5
and.16b v6, v3, v6
cmeq.2d v6, v6, #0
mov x4, #2
mov x5, #2
movk x5, #57344, lsl 16
movk x4, #57344, lsl 16
movk x5, #60199, lsl 32
movk x5, #3, lsl 48
dup.2d v7, x5
movk x4, #60199, lsl 32
bic.16b v7, v7, v6
mov x5, #10364
movk x5, #11794, lsl 16
movk x4, #34755, lsl 48
movk x5, #3895, lsl 32
movk x5, #9, lsl 48
mov x6, #57634
dup.2d v8, x5
bic.16b v8, v8, v6
mov x5, #26576
movk x6, #62322, lsl 16
movk x5, #47696, lsl 16
movk x5, #688, lsl 32
movk x5, #3, lsl 48
movk x6, #53392, lsl 32
dup.2d v9, x5
bic.16b v9, v9, v6
movk x6, #20583, lsl 48
mov x5, #46800
movk x5, #2568, lsl 16
movk x5, #1335, lsl 32
mov x7, #45242
movk x5, #4, lsl 48
dup.2d v10, x5
bic.16b v10, v10, v6
movk x7, #770, lsl 16
mov x5, #49763
movk x5, #40165, lsl 16
movk x5, #24776, lsl 32
movk x7, #35693, lsl 32
dup.2d v11, x5
bic.16b v6, v11, v6
movk x7, #28832, lsl 48
sub.2d v0, v0, v7
ssra.2d v0, v4, #52
sub.2d v4, v1, v8
mov x5, #16467
ssra.2d v4, v0, #52
sub.2d v7, v2, v9
ssra.2d v7, v4, #52
movk x5, #49763, lsl 16
sub.2d v5, v5, v10
ssra.2d v5, v7, #52
movk x5, #40165, lsl 32
sub.2d v6, v3, v6
ssra.2d v6, v5, #52
ushr.2d v1, v4, #12
movk x5, #24776, lsl 48
ushr.2d v2, v7, #24
ushr.2d v3, v5, #36
sli.2d v0, v4, #52
subs x4, x0, x4
sbcs x6, x1, x6
sbcs x7, x2, x7
sbcs x5, x3, x5
sli.2d v1, v7, #40
sli.2d v2, v5, #28
sli.2d v3, v6, #16
tst x3, #9223372036854775808
csel x0, x4, x0, mi
csel x1, x6, x1, mi
csel x2, x7, x2, mi
csel x3, x5, x3, mi
ret
|
xrvdg/modmulzoo
| 4,538
|
crates/modmul-asm/asm/vmultadd_noinit_simd.s
|
//in("v0") t[0], in("v1") t[1], in("v2") t[2], in("v3") t[3], in("v4") t[4], in("v5") t[5], in("v6") t[6], in("v7") t[7], in("v8") t[8], in("v9") t[9],
//in("v10") a[0], in("v11") a[1], in("v12") a[2], in("v13") a[3], in("v14") a[4],
//in("v15") b[0], in("v16") b[1], in("v17") b[2], in("v18") b[3], in("v19") b[4],
//lateout("v0") out[0], lateout("v1") out[1], lateout("v2") out[2], lateout("v3") out[3], lateout("v4") out[4], lateout("v5") out[5], lateout("v6") out[6], lateout("v7") out[7], lateout("v8") out[8], lateout("v9") out[9],
//lateout("x0") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, lateout("v20") _, lateout("v21") _, lateout("v22") _, lateout("v23") _,
//lateout("lr") _
.global _vmultadd_noinit_simd
.align 4
.text
_vmultadd_noinit_simd:
mov x0, #5075556780046548992
dup.2d v20, x0
mov x0, #1
movk x0, #18032, lsl 48
dup.2d v21, x0
ucvtf.2d v10, v10
ucvtf.2d v11, v11
ucvtf.2d v12, v12
ucvtf.2d v13, v13
ucvtf.2d v14, v14
ucvtf.2d v15, v15
ucvtf.2d v16, v16
ucvtf.2d v17, v17
ucvtf.2d v18, v18
ucvtf.2d v19, v19
mov.16b v22, v20
fmla.2d v22, v10, v15
fsub.2d v23, v21, v22
fmla.2d v23, v10, v15
add.2d v1, v1, v22
add.2d v0, v0, v23
mov.16b v22, v20
fmla.2d v22, v10, v16
fsub.2d v23, v21, v22
fmla.2d v23, v10, v16
add.2d v2, v2, v22
add.2d v1, v1, v23
mov.16b v22, v20
fmla.2d v22, v10, v17
fsub.2d v23, v21, v22
fmla.2d v23, v10, v17
add.2d v3, v3, v22
add.2d v2, v2, v23
mov.16b v22, v20
fmla.2d v22, v10, v18
fsub.2d v23, v21, v22
fmla.2d v23, v10, v18
add.2d v4, v4, v22
add.2d v3, v3, v23
mov.16b v22, v20
fmla.2d v22, v10, v19
fsub.2d v23, v21, v22
fmla.2d v23, v10, v19
add.2d v5, v5, v22
add.2d v4, v4, v23
mov.16b v10, v20
fmla.2d v10, v11, v15
fsub.2d v22, v21, v10
fmla.2d v22, v11, v15
add.2d v2, v2, v10
add.2d v1, v1, v22
mov.16b v10, v20
fmla.2d v10, v11, v16
fsub.2d v22, v21, v10
fmla.2d v22, v11, v16
add.2d v3, v3, v10
add.2d v2, v2, v22
mov.16b v10, v20
fmla.2d v10, v11, v17
fsub.2d v22, v21, v10
fmla.2d v22, v11, v17
add.2d v4, v4, v10
add.2d v3, v3, v22
mov.16b v10, v20
fmla.2d v10, v11, v18
fsub.2d v22, v21, v10
fmla.2d v22, v11, v18
add.2d v5, v5, v10
add.2d v4, v4, v22
mov.16b v10, v20
fmla.2d v10, v11, v19
fsub.2d v22, v21, v10
fmla.2d v22, v11, v19
add.2d v6, v6, v10
add.2d v5, v5, v22
mov.16b v10, v20
fmla.2d v10, v12, v15
fsub.2d v11, v21, v10
fmla.2d v11, v12, v15
add.2d v3, v3, v10
add.2d v2, v2, v11
mov.16b v10, v20
fmla.2d v10, v12, v16
fsub.2d v11, v21, v10
fmla.2d v11, v12, v16
add.2d v4, v4, v10
add.2d v3, v3, v11
mov.16b v10, v20
fmla.2d v10, v12, v17
fsub.2d v11, v21, v10
fmla.2d v11, v12, v17
add.2d v5, v5, v10
add.2d v4, v4, v11
mov.16b v10, v20
fmla.2d v10, v12, v18
fsub.2d v11, v21, v10
fmla.2d v11, v12, v18
add.2d v6, v6, v10
add.2d v5, v5, v11
mov.16b v10, v20
fmla.2d v10, v12, v19
fsub.2d v11, v21, v10
fmla.2d v11, v12, v19
add.2d v7, v7, v10
add.2d v6, v6, v11
mov.16b v10, v20
fmla.2d v10, v13, v15
fsub.2d v11, v21, v10
fmla.2d v11, v13, v15
add.2d v4, v4, v10
add.2d v3, v3, v11
mov.16b v10, v20
fmla.2d v10, v13, v16
fsub.2d v11, v21, v10
fmla.2d v11, v13, v16
add.2d v5, v5, v10
add.2d v4, v4, v11
mov.16b v10, v20
fmla.2d v10, v13, v17
fsub.2d v11, v21, v10
fmla.2d v11, v13, v17
add.2d v6, v6, v10
add.2d v5, v5, v11
mov.16b v10, v20
fmla.2d v10, v13, v18
fsub.2d v11, v21, v10
fmla.2d v11, v13, v18
add.2d v7, v7, v10
add.2d v6, v6, v11
mov.16b v10, v20
fmla.2d v10, v13, v19
fsub.2d v11, v21, v10
fmla.2d v11, v13, v19
add.2d v8, v8, v10
add.2d v7, v7, v11
mov.16b v10, v20
fmla.2d v10, v14, v15
fsub.2d v11, v21, v10
fmla.2d v11, v14, v15
add.2d v5, v5, v10
add.2d v4, v4, v11
mov.16b v10, v20
fmla.2d v10, v14, v16
fsub.2d v11, v21, v10
fmla.2d v11, v14, v16
add.2d v6, v6, v10
add.2d v5, v5, v11
mov.16b v10, v20
fmla.2d v10, v14, v17
fsub.2d v11, v21, v10
fmla.2d v11, v14, v17
add.2d v7, v7, v10
add.2d v6, v6, v11
mov.16b v10, v20
fmla.2d v10, v14, v18
fsub.2d v11, v21, v10
fmla.2d v11, v14, v18
add.2d v8, v8, v10
add.2d v7, v7, v11
mov.16b v10, v20
fmla.2d v10, v14, v19
fsub.2d v11, v21, v10
fmla.2d v11, v14, v19
add.2d v9, v9, v10
add.2d v8, v8, v11
ret
|
xrvdg/modmulzoo
| 30,745
|
crates/modmul-asm/asm/single_step_interleaved_triple_scalar.s
|
//in("x0") a,
//in("x1") b,
//in("x2") a1,
//in("x3") b1,
//in("x4") a2,
//in("x5") b2,
//in("v0") av[0], in("v1") av[1], in("v2") av[2], in("v3") av[3],
//in("v4") bv[0], in("v5") bv[1], in("v6") bv[2], in("v7") bv[3],
//lateout("x0") a,
//lateout("x2") a1,
//lateout("x4") a2,
//lateout("v0") outv[0], lateout("v1") outv[1], lateout("v2") outv[2], lateout("v3") outv[3],
//lateout("x1") _, lateout("x3") _, lateout("x5") _, lateout("x6") _, lateout("x7") _, lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _, lateout("x16") _, lateout("x17") _, lateout("x20") _, lateout("x21") _, lateout("x22") _, lateout("x23") _, lateout("v4") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, lateout("v20") _, lateout("v21") _, lateout("v22") _, lateout("v23") _, lateout("v24") _,
//lateout("lr") _
.global _single_step_interleaved_triple_scalar
.align 4
.text
_single_step_interleaved_triple_scalar:
ldp x6, x7, [x0, #0]
mov x8, #4503599627370495
ldp x9, x10, [x0, #16]
dup.2d v8, x8
ldp x11, x12, [x1, #0]
shl.2d v9, v1, #14
ldp x1, x13, [x1, #16]
shl.2d v10, v2, #26
mul x14, x6, x11
shl.2d v11, v3, #38
umulh x15, x6, x11
ushr.2d v3, v3, #14
mul x16, x7, x11
umulh x17, x7, x11
shl.2d v12, v0, #2
adds x15, x16, x15
cinc x16, x17, hs
usra.2d v9, v0, #50
mul x17, x9, x11
usra.2d v10, v1, #38
umulh x20, x9, x11
usra.2d v11, v2, #26
adds x16, x17, x16
cinc x17, x20, hs
and.16b v0, v12, v8
mul x20, x10, x11
and.16b v1, v9, v8
umulh x11, x10, x11
and.16b v2, v10, v8
adds x17, x20, x17
cinc x11, x11, hs
mul x20, x6, x12
and.16b v9, v11, v8
umulh x21, x6, x12
shl.2d v10, v5, #14
adds x15, x20, x15
cinc x20, x21, hs
shl.2d v11, v6, #26
mul x21, x7, x12
shl.2d v12, v7, #38
umulh x22, x7, x12
ushr.2d v7, v7, #14
adds x20, x21, x20
cinc x21, x22, hs
shl.2d v13, v4, #2
adds x16, x20, x16
cinc x20, x21, hs
usra.2d v10, v4, #50
mul x21, x9, x12
umulh x22, x9, x12
usra.2d v11, v5, #38
adds x20, x21, x20
cinc x21, x22, hs
usra.2d v12, v6, #26
adds x17, x20, x17
cinc x20, x21, hs
and.16b v4, v13, v8
mul x21, x10, x12
and.16b v5, v10, v8
umulh x12, x10, x12
and.16b v6, v11, v8
adds x20, x21, x20
cinc x12, x12, hs
and.16b v10, v12, v8
adds x11, x20, x11
cinc x12, x12, hs
mov x20, #13605374474286268416
mul x21, x6, x1
umulh x22, x6, x1
dup.2d v11, x20
adds x16, x21, x16
cinc x20, x22, hs
mov x21, #6440147467139809280
mul x22, x7, x1
dup.2d v12, x21
umulh x21, x7, x1
mov x23, #3688448094816436224
adds x20, x22, x20
cinc x21, x21, hs
dup.2d v13, x23
adds x17, x20, x17
cinc x20, x21, hs
mov x21, #9209861237972664320
mul x22, x9, x1
dup.2d v14, x21
umulh x21, x9, x1
adds x20, x22, x20
cinc x21, x21, hs
mov x22, #12218265789056155648
adds x11, x20, x11
cinc x20, x21, hs
dup.2d v15, x22
mul x21, x10, x1
mov x22, #17739678932212383744
umulh x1, x10, x1
dup.2d v16, x22
adds x20, x21, x20
cinc x1, x1, hs
mov x21, #2301339409586323456
adds x12, x20, x12
cinc x1, x1, hs
dup.2d v17, x21
mul x20, x6, x13
mov x21, #7822752552742551552
umulh x6, x6, x13
adds x17, x20, x17
cinc x6, x6, hs
dup.2d v18, x21
mul x20, x7, x13
mov x21, #5071053180419178496
umulh x7, x7, x13
dup.2d v19, x21
adds x6, x20, x6
cinc x7, x7, hs
mov x20, #16352570246982270976
adds x6, x6, x11
cinc x7, x7, hs
dup.2d v20, x20
mul x11, x9, x13
mov x20, #5075556780046548992
umulh x9, x9, x13
dup.2d v21, x20
adds x7, x11, x7
cinc x9, x9, hs
adds x7, x7, x12
cinc x9, x9, hs
mov x11, #1
mul x12, x10, x13
movk x11, #18032, lsl 48
umulh x10, x10, x13
dup.2d v22, x11
adds x9, x12, x9
cinc x10, x10, hs
ucvtf.2d v0, v0
adds x1, x9, x1
cinc x9, x10, hs
ucvtf.2d v1, v1
mov x10, #48718
ucvtf.2d v2, v2
movk x10, #4732, lsl 16
ucvtf.2d v9, v9
movk x10, #45078, lsl 32
movk x10, #39852, lsl 48
ucvtf.2d v3, v3
mov x11, #16676
ucvtf.2d v4, v4
movk x11, #12692, lsl 16
ucvtf.2d v5, v5
movk x11, #20986, lsl 32
ucvtf.2d v6, v6
movk x11, #2848, lsl 48
ucvtf.2d v10, v10
mov x12, #51052
ucvtf.2d v7, v7
movk x12, #24721, lsl 16
mov.16b v23, v21
movk x12, #61092, lsl 32
movk x12, #45156, lsl 48
fmla.2d v23, v0, v4
mov x13, #3197
fsub.2d v24, v22, v23
movk x13, #18936, lsl 16
fmla.2d v24, v0, v4
movk x13, #10922, lsl 32
add.2d v13, v13, v23
movk x13, #11014, lsl 48
add.2d v11, v11, v24
mul x20, x10, x14
mov.16b v23, v21
umulh x10, x10, x14
fmla.2d v23, v0, v5
adds x17, x20, x17
cinc x10, x10, hs
mul x20, x11, x14
fsub.2d v24, v22, v23
umulh x11, x11, x14
fmla.2d v24, v0, v5
adds x10, x20, x10
cinc x11, x11, hs
add.2d v15, v15, v23
adds x6, x10, x6
cinc x10, x11, hs
add.2d v13, v13, v24
mul x11, x12, x14
mov.16b v23, v21
umulh x12, x12, x14
fmla.2d v23, v0, v6
adds x10, x11, x10
cinc x11, x12, hs
fsub.2d v24, v22, v23
adds x7, x10, x7
cinc x10, x11, hs
mul x11, x13, x14
fmla.2d v24, v0, v6
umulh x12, x13, x14
add.2d v17, v17, v23
adds x10, x11, x10
cinc x11, x12, hs
add.2d v15, v15, v24
adds x1, x10, x1
cinc x10, x11, hs
mov.16b v23, v21
add x9, x9, x10
fmla.2d v23, v0, v10
mov x10, #56431
fsub.2d v24, v22, v23
movk x10, #30457, lsl 16
fmla.2d v24, v0, v10
movk x10, #30012, lsl 32
movk x10, #6382, lsl 48
add.2d v19, v19, v23
mov x11, #59151
add.2d v17, v17, v24
movk x11, #41769, lsl 16
mov.16b v23, v21
movk x11, #32276, lsl 32
fmla.2d v23, v0, v7
movk x11, #21677, lsl 48
fsub.2d v24, v22, v23
mov x12, #34015
fmla.2d v24, v0, v7
movk x12, #20342, lsl 16
add.2d v0, v20, v23
movk x12, #13935, lsl 32
movk x12, #11030, lsl 48
add.2d v19, v19, v24
mov x13, #13689
mov.16b v20, v21
movk x13, #8159, lsl 16
fmla.2d v20, v1, v4
movk x13, #215, lsl 32
fsub.2d v23, v22, v20
movk x13, #4913, lsl 48
fmla.2d v23, v1, v4
mul x14, x10, x15
add.2d v15, v15, v20
umulh x10, x10, x15
add.2d v13, v13, v23
adds x14, x14, x17
cinc x10, x10, hs
mul x17, x11, x15
mov.16b v20, v21
umulh x11, x11, x15
fmla.2d v20, v1, v5
adds x10, x17, x10
cinc x11, x11, hs
fsub.2d v23, v22, v20
adds x6, x10, x6
cinc x10, x11, hs
fmla.2d v23, v1, v5
mul x11, x12, x15
add.2d v17, v17, v20
umulh x12, x12, x15
add.2d v15, v15, v23
adds x10, x11, x10
cinc x11, x12, hs
mov.16b v20, v21
adds x7, x10, x7
cinc x10, x11, hs
mul x11, x13, x15
fmla.2d v20, v1, v6
umulh x12, x13, x15
fsub.2d v23, v22, v20
adds x10, x11, x10
cinc x11, x12, hs
fmla.2d v23, v1, v6
adds x1, x10, x1
cinc x10, x11, hs
add.2d v19, v19, v20
add x9, x9, x10
add.2d v17, v17, v23
mov x10, #61005
mov.16b v20, v21
movk x10, #58262, lsl 16
fmla.2d v20, v1, v10
movk x10, #32851, lsl 32
movk x10, #11582, lsl 48
fsub.2d v23, v22, v20
mov x11, #37581
fmla.2d v23, v1, v10
movk x11, #43836, lsl 16
add.2d v0, v0, v20
movk x11, #36286, lsl 32
add.2d v19, v19, v23
movk x11, #51783, lsl 48
mov.16b v20, v21
mov x12, #10899
fmla.2d v20, v1, v7
movk x12, #30709, lsl 16
fsub.2d v23, v22, v20
movk x12, #61551, lsl 32
movk x12, #45784, lsl 48
fmla.2d v23, v1, v7
mov x13, #36612
add.2d v1, v18, v20
movk x13, #63402, lsl 16
add.2d v0, v0, v23
movk x13, #47623, lsl 32
mov.16b v18, v21
movk x13, #9430, lsl 48
fmla.2d v18, v2, v4
mul x15, x10, x16
fsub.2d v20, v22, v18
umulh x10, x10, x16
fmla.2d v20, v2, v4
adds x14, x15, x14
cinc x10, x10, hs
mul x15, x11, x16
add.2d v17, v17, v18
umulh x11, x11, x16
add.2d v15, v15, v20
adds x10, x15, x10
cinc x11, x11, hs
mov.16b v18, v21
adds x6, x10, x6
cinc x10, x11, hs
fmla.2d v18, v2, v5
mul x11, x12, x16
fsub.2d v20, v22, v18
umulh x12, x12, x16
fmla.2d v20, v2, v5
adds x10, x11, x10
cinc x11, x12, hs
add.2d v18, v19, v18
adds x7, x10, x7
cinc x10, x11, hs
mul x11, x13, x16
add.2d v17, v17, v20
umulh x12, x13, x16
mov.16b v19, v21
adds x10, x11, x10
cinc x11, x12, hs
fmla.2d v19, v2, v6
adds x1, x10, x1
cinc x10, x11, hs
fsub.2d v20, v22, v19
add x9, x9, x10
fmla.2d v20, v2, v6
mov x10, #65535
add.2d v0, v0, v19
movk x10, #61439, lsl 16
add.2d v18, v18, v20
movk x10, #62867, lsl 32
movk x10, #49889, lsl 48
mov.16b v19, v21
mul x10, x10, x14
fmla.2d v19, v2, v10
mov x11, #1
fsub.2d v20, v22, v19
movk x11, #61440, lsl 16
fmla.2d v20, v2, v10
movk x11, #62867, lsl 32
add.2d v1, v1, v19
movk x11, #17377, lsl 48
add.2d v0, v0, v20
mov x12, #28817
mov.16b v19, v21
movk x12, #31161, lsl 16
movk x12, #59464, lsl 32
fmla.2d v19, v2, v7
movk x12, #10291, lsl 48
fsub.2d v20, v22, v19
mov x13, #22621
fmla.2d v20, v2, v7
movk x13, #33153, lsl 16
add.2d v2, v16, v19
movk x13, #17846, lsl 32
add.2d v1, v1, v20
movk x13, #47184, lsl 48
mov.16b v16, v21
mov x15, #41001
fmla.2d v16, v9, v4
movk x15, #57649, lsl 16
movk x15, #20082, lsl 32
fsub.2d v19, v22, v16
movk x15, #12388, lsl 48
fmla.2d v19, v9, v4
mul x16, x11, x10
add.2d v16, v18, v16
umulh x11, x11, x10
add.2d v17, v17, v19
cmn x16, x14
cinc x11, x11, hs
mov.16b v18, v21
mul x14, x12, x10
fmla.2d v18, v9, v5
umulh x12, x12, x10
fsub.2d v19, v22, v18
adds x11, x14, x11
cinc x12, x12, hs
adds x6, x11, x6
cinc x11, x12, hs
fmla.2d v19, v9, v5
mul x12, x13, x10
add.2d v0, v0, v18
umulh x13, x13, x10
add.2d v16, v16, v19
adds x11, x12, x11
cinc x12, x13, hs
mov.16b v18, v21
adds x7, x11, x7
cinc x11, x12, hs
fmla.2d v18, v9, v6
mul x12, x15, x10
fsub.2d v19, v22, v18
umulh x10, x15, x10
fmla.2d v19, v9, v6
adds x11, x12, x11
cinc x10, x10, hs
adds x1, x11, x1
cinc x10, x10, hs
add.2d v1, v1, v18
add x9, x9, x10
add.2d v0, v0, v19
mov x10, #2
mov.16b v18, v21
movk x10, #57344, lsl 16
fmla.2d v18, v9, v10
movk x10, #60199, lsl 32
fsub.2d v19, v22, v18
movk x10, #34755, lsl 48
fmla.2d v19, v9, v10
mov x11, #57634
add.2d v2, v2, v18
movk x11, #62322, lsl 16
movk x11, #53392, lsl 32
add.2d v1, v1, v19
movk x11, #20583, lsl 48
mov.16b v18, v21
mov x12, #45242
fmla.2d v18, v9, v7
movk x12, #770, lsl 16
fsub.2d v19, v22, v18
movk x12, #35693, lsl 32
fmla.2d v19, v9, v7
movk x12, #28832, lsl 48
add.2d v9, v14, v18
mov x13, #16467
add.2d v2, v2, v19
movk x13, #49763, lsl 16
movk x13, #40165, lsl 32
mov.16b v14, v21
movk x13, #24776, lsl 48
fmla.2d v14, v3, v4
subs x10, x6, x10
sbcs x11, x7, x11
sbcs x12, x1, x12
sbcs x13, x9, x13
fsub.2d v18, v22, v14
tst x9, #9223372036854775808
csel x6, x10, x6, mi
csel x7, x11, x7, mi
csel x1, x12, x1, mi
csel x9, x13, x9, mi
fmla.2d v18, v3, v4
stp x6, x7, [x0, #0]
add.2d v0, v0, v14
stp x1, x9, [x0, #16]
add.2d v4, v16, v18
ldp x1, x6, [x2, #0]
mov.16b v14, v21
ldp x7, x9, [x2, #16]
ldp x10, x11, [x3, #0]
fmla.2d v14, v3, v5
ldp x3, x12, [x3, #16]
fsub.2d v16, v22, v14
mul x13, x1, x10
fmla.2d v16, v3, v5
umulh x14, x1, x10
add.2d v1, v1, v14
mul x15, x6, x10
add.2d v0, v0, v16
umulh x16, x6, x10
mov.16b v5, v21
adds x14, x15, x14
cinc x15, x16, hs
fmla.2d v5, v3, v6
mul x16, x7, x10
umulh x17, x7, x10
fsub.2d v14, v22, v5
adds x15, x16, x15
cinc x16, x17, hs
fmla.2d v14, v3, v6
mul x17, x9, x10
add.2d v2, v2, v5
umulh x10, x9, x10
add.2d v1, v1, v14
adds x16, x17, x16
cinc x10, x10, hs
mov.16b v5, v21
mul x17, x1, x11
fmla.2d v5, v3, v10
umulh x20, x1, x11
fsub.2d v6, v22, v5
adds x14, x17, x14
cinc x17, x20, hs
mul x20, x6, x11
fmla.2d v6, v3, v10
umulh x21, x6, x11
add.2d v5, v9, v5
adds x17, x20, x17
cinc x20, x21, hs
add.2d v2, v2, v6
adds x15, x17, x15
cinc x17, x20, hs
mov.16b v6, v21
mul x20, x7, x11
fmla.2d v6, v3, v7
umulh x21, x7, x11
fsub.2d v9, v22, v6
adds x17, x20, x17
cinc x20, x21, hs
fmla.2d v9, v3, v7
adds x16, x17, x16
cinc x17, x20, hs
mul x20, x9, x11
add.2d v3, v12, v6
umulh x11, x9, x11
add.2d v5, v5, v9
adds x17, x20, x17
cinc x11, x11, hs
usra.2d v13, v11, #52
adds x10, x17, x10
cinc x11, x11, hs
usra.2d v15, v13, #52
mul x17, x1, x3
usra.2d v17, v15, #52
umulh x20, x1, x3
usra.2d v4, v17, #52
adds x15, x17, x15
cinc x17, x20, hs
and.16b v6, v11, v8
mul x20, x6, x3
umulh x21, x6, x3
and.16b v7, v13, v8
adds x17, x20, x17
cinc x20, x21, hs
and.16b v9, v15, v8
adds x16, x17, x16
cinc x17, x20, hs
and.16b v8, v17, v8
mul x20, x7, x3
ucvtf.2d v6, v6
umulh x21, x7, x3
mov x22, #37864
adds x17, x20, x17
cinc x20, x21, hs
movk x22, #1815, lsl 16
adds x10, x17, x10
cinc x17, x20, hs
movk x22, #28960, lsl 32
mul x20, x9, x3
umulh x3, x9, x3
movk x22, #17153, lsl 48
adds x17, x20, x17
cinc x3, x3, hs
dup.2d v10, x22
adds x11, x17, x11
cinc x3, x3, hs
mov.16b v11, v21
mul x17, x1, x12
fmla.2d v11, v6, v10
umulh x1, x1, x12
fsub.2d v12, v22, v11
adds x16, x17, x16
cinc x1, x1, hs
fmla.2d v12, v6, v10
mul x17, x6, x12
add.2d v0, v0, v11
umulh x6, x6, x12
adds x1, x17, x1
cinc x6, x6, hs
add.2d v4, v4, v12
adds x1, x1, x10
cinc x6, x6, hs
mov x10, #46128
mul x17, x7, x12
movk x10, #29964, lsl 16
umulh x7, x7, x12
movk x10, #7587, lsl 32
adds x6, x17, x6
cinc x7, x7, hs
movk x10, #17161, lsl 48
adds x6, x6, x11
cinc x7, x7, hs
dup.2d v10, x10
mul x10, x9, x12
mov.16b v11, v21
umulh x9, x9, x12
adds x7, x10, x7
cinc x9, x9, hs
fmla.2d v11, v6, v10
adds x3, x7, x3
cinc x7, x9, hs
fsub.2d v12, v22, v11
mov x9, #48718
fmla.2d v12, v6, v10
movk x9, #4732, lsl 16
add.2d v1, v1, v11
movk x9, #45078, lsl 32
add.2d v0, v0, v12
movk x9, #39852, lsl 48
mov x10, #52826
mov x11, #16676
movk x10, #57790, lsl 16
movk x11, #12692, lsl 16
movk x11, #20986, lsl 32
movk x10, #55431, lsl 32
movk x11, #2848, lsl 48
movk x10, #17196, lsl 48
mov x12, #51052
dup.2d v10, x10
movk x12, #24721, lsl 16
mov.16b v11, v21
movk x12, #61092, lsl 32
fmla.2d v11, v6, v10
movk x12, #45156, lsl 48
fsub.2d v12, v22, v11
mov x10, #3197
fmla.2d v12, v6, v10
movk x10, #18936, lsl 16
movk x10, #10922, lsl 32
add.2d v2, v2, v11
movk x10, #11014, lsl 48
add.2d v1, v1, v12
mul x17, x9, x13
mov x20, #31276
umulh x9, x9, x13
movk x20, #21262, lsl 16
adds x16, x17, x16
cinc x9, x9, hs
movk x20, #2304, lsl 32
mul x17, x11, x13
movk x20, #17182, lsl 48
umulh x11, x11, x13
dup.2d v10, x20
adds x9, x17, x9
cinc x11, x11, hs
adds x1, x9, x1
cinc x9, x11, hs
mov.16b v11, v21
mul x11, x12, x13
fmla.2d v11, v6, v10
umulh x12, x12, x13
fsub.2d v12, v22, v11
adds x9, x11, x9
cinc x11, x12, hs
fmla.2d v12, v6, v10
adds x6, x9, x6
cinc x9, x11, hs
add.2d v5, v5, v11
mul x11, x10, x13
add.2d v2, v2, v12
umulh x10, x10, x13
mov x12, #28672
adds x9, x11, x9
cinc x10, x10, hs
adds x3, x9, x3
cinc x9, x10, hs
movk x12, #24515, lsl 16
add x7, x7, x9
movk x12, #54929, lsl 32
mov x9, #56431
movk x12, #17064, lsl 48
movk x9, #30457, lsl 16
dup.2d v10, x12
movk x9, #30012, lsl 32
mov.16b v11, v21
movk x9, #6382, lsl 48
fmla.2d v11, v6, v10
mov x10, #59151
fsub.2d v12, v22, v11
movk x10, #41769, lsl 16
movk x10, #32276, lsl 32
fmla.2d v12, v6, v10
movk x10, #21677, lsl 48
add.2d v3, v3, v11
mov x11, #34015
add.2d v5, v5, v12
movk x11, #20342, lsl 16
ucvtf.2d v6, v7
movk x11, #13935, lsl 32
mov x12, #44768
movk x11, #11030, lsl 48
movk x12, #51919, lsl 16
mov x13, #13689
movk x12, #6346, lsl 32
movk x13, #8159, lsl 16
movk x13, #215, lsl 32
movk x12, #17133, lsl 48
movk x13, #4913, lsl 48
dup.2d v7, x12
mul x12, x9, x14
mov.16b v10, v21
umulh x9, x9, x14
fmla.2d v10, v6, v7
adds x12, x12, x16
cinc x9, x9, hs
fsub.2d v11, v22, v10
mul x16, x10, x14
fmla.2d v11, v6, v7
umulh x10, x10, x14
add.2d v0, v0, v10
adds x9, x16, x9
cinc x10, x10, hs
adds x1, x9, x1
cinc x9, x10, hs
add.2d v4, v4, v11
mul x10, x11, x14
mov x16, #47492
umulh x11, x11, x14
movk x16, #23630, lsl 16
adds x9, x10, x9
cinc x10, x11, hs
movk x16, #49985, lsl 32
adds x6, x9, x6
cinc x9, x10, hs
movk x16, #17168, lsl 48
mul x10, x13, x14
dup.2d v7, x16
umulh x11, x13, x14
mov.16b v10, v21
adds x9, x10, x9
cinc x10, x11, hs
adds x3, x9, x3
cinc x9, x10, hs
fmla.2d v10, v6, v7
add x7, x7, x9
fsub.2d v11, v22, v10
mov x9, #61005
fmla.2d v11, v6, v7
movk x9, #58262, lsl 16
add.2d v1, v1, v10
movk x9, #32851, lsl 32
add.2d v0, v0, v11
movk x9, #11582, lsl 48
mov x10, #57936
mov x11, #37581
movk x10, #54828, lsl 16
movk x11, #43836, lsl 16
movk x11, #36286, lsl 32
movk x10, #18292, lsl 32
movk x11, #51783, lsl 48
movk x10, #17197, lsl 48
mov x13, #10899
dup.2d v7, x10
movk x13, #30709, lsl 16
mov.16b v10, v21
movk x13, #61551, lsl 32
fmla.2d v10, v6, v7
movk x13, #45784, lsl 48
fsub.2d v11, v22, v10
mov x10, #36612
fmla.2d v11, v6, v7
movk x10, #63402, lsl 16
movk x10, #47623, lsl 32
add.2d v2, v2, v10
movk x10, #9430, lsl 48
add.2d v1, v1, v11
mul x14, x9, x15
mov x16, #17708
umulh x9, x9, x15
movk x16, #43915, lsl 16
adds x12, x14, x12
cinc x9, x9, hs
movk x16, #64348, lsl 32
mul x14, x11, x15
movk x16, #17188, lsl 48
umulh x11, x11, x15
dup.2d v7, x16
adds x9, x14, x9
cinc x11, x11, hs
adds x1, x9, x1
cinc x9, x11, hs
mov.16b v10, v21
mul x11, x13, x15
fmla.2d v10, v6, v7
umulh x13, x13, x15
fsub.2d v11, v22, v10
adds x9, x11, x9
cinc x11, x13, hs
fmla.2d v11, v6, v7
adds x6, x9, x6
cinc x9, x11, hs
add.2d v5, v5, v10
mul x11, x10, x15
add.2d v2, v2, v11
umulh x10, x10, x15
mov x13, #29184
adds x9, x11, x9
cinc x10, x10, hs
adds x3, x9, x3
cinc x9, x10, hs
movk x13, #20789, lsl 16
add x7, x7, x9
movk x13, #19197, lsl 32
mov x9, #65535
movk x13, #17083, lsl 48
movk x9, #61439, lsl 16
dup.2d v7, x13
movk x9, #62867, lsl 32
mov.16b v10, v21
movk x9, #49889, lsl 48
fmla.2d v10, v6, v7
mul x9, x9, x12
fsub.2d v11, v22, v10
mov x10, #1
movk x10, #61440, lsl 16
fmla.2d v11, v6, v7
movk x10, #62867, lsl 32
add.2d v3, v3, v10
movk x10, #17377, lsl 48
add.2d v5, v5, v11
mov x11, #28817
ucvtf.2d v6, v9
movk x11, #31161, lsl 16
mov x13, #58856
movk x11, #59464, lsl 32
movk x13, #14953, lsl 16
movk x11, #10291, lsl 48
movk x13, #15155, lsl 32
mov x14, #22621
movk x14, #33153, lsl 16
movk x13, #17181, lsl 48
movk x14, #17846, lsl 32
dup.2d v7, x13
movk x14, #47184, lsl 48
mov.16b v9, v21
mov x13, #41001
fmla.2d v9, v6, v7
movk x13, #57649, lsl 16
fsub.2d v10, v22, v9
movk x13, #20082, lsl 32
fmla.2d v10, v6, v7
movk x13, #12388, lsl 48
add.2d v0, v0, v9
mul x15, x10, x9
umulh x10, x10, x9
add.2d v4, v4, v10
cmn x15, x12
cinc x10, x10, hs
mov x12, #35392
mul x15, x11, x9
movk x12, #12477, lsl 16
umulh x11, x11, x9
movk x12, #56780, lsl 32
adds x10, x15, x10
cinc x11, x11, hs
movk x12, #17142, lsl 48
adds x1, x10, x1
cinc x10, x11, hs
dup.2d v7, x12
mul x11, x14, x9
mov.16b v9, v21
umulh x12, x14, x9
adds x10, x11, x10
cinc x11, x12, hs
fmla.2d v9, v6, v7
adds x6, x10, x6
cinc x10, x11, hs
fsub.2d v10, v22, v9
mul x11, x13, x9
fmla.2d v10, v6, v7
umulh x9, x13, x9
add.2d v1, v1, v9
adds x10, x11, x10
cinc x9, x9, hs
add.2d v0, v0, v10
adds x3, x10, x3
cinc x9, x9, hs
mov x10, #9848
add x7, x7, x9
movk x10, #54501, lsl 16
mov x9, #2
movk x9, #57344, lsl 16
movk x10, #31540, lsl 32
movk x9, #60199, lsl 32
movk x10, #17170, lsl 48
movk x9, #34755, lsl 48
dup.2d v7, x10
mov x10, #57634
mov.16b v9, v21
movk x10, #62322, lsl 16
fmla.2d v9, v6, v7
movk x10, #53392, lsl 32
fsub.2d v10, v22, v9
movk x10, #20583, lsl 48
fmla.2d v10, v6, v7
mov x11, #45242
movk x11, #770, lsl 16
add.2d v2, v2, v9
movk x11, #35693, lsl 32
add.2d v1, v1, v10
movk x11, #28832, lsl 48
mov x12, #9584
mov x13, #16467
movk x12, #63883, lsl 16
movk x13, #49763, lsl 16
movk x12, #18253, lsl 32
movk x13, #40165, lsl 32
movk x12, #17190, lsl 48
movk x13, #24776, lsl 48
dup.2d v7, x12
subs x9, x1, x9
sbcs x10, x6, x10
sbcs x11, x3, x11
sbcs x12, x7, x13
tst x7, #9223372036854775808
csel x1, x9, x1, mi
csel x6, x10, x6, mi
csel x3, x11, x3, mi
csel x7, x12, x7, mi
mov.16b v9, v21
stp x1, x6, [x2, #0]
fmla.2d v9, v6, v7
stp x3, x7, [x2, #16]
fsub.2d v10, v22, v9
ldp x1, x3, [x4, #0]
fmla.2d v10, v6, v7
ldp x6, x7, [x4, #16]
add.2d v5, v5, v9
ldp x9, x10, [x5, #0]
add.2d v2, v2, v10
ldp x5, x11, [x5, #16]
mov x12, #51712
mul x13, x1, x9
umulh x14, x1, x9
movk x12, #16093, lsl 16
mul x15, x3, x9
movk x12, #30633, lsl 32
umulh x16, x3, x9
movk x12, #17068, lsl 48
adds x14, x15, x14
cinc x15, x16, hs
dup.2d v7, x12
mul x12, x6, x9
mov.16b v9, v21
umulh x16, x6, x9
fmla.2d v9, v6, v7
adds x12, x12, x15
cinc x15, x16, hs
fsub.2d v10, v22, v9
mul x16, x7, x9
umulh x9, x7, x9
fmla.2d v10, v6, v7
adds x15, x16, x15
cinc x9, x9, hs
add.2d v3, v3, v9
mul x16, x1, x10
add.2d v5, v5, v10
umulh x17, x1, x10
ucvtf.2d v6, v8
adds x14, x16, x14
cinc x16, x17, hs
mov x17, #34724
mul x20, x3, x10
movk x17, #40393, lsl 16
umulh x21, x3, x10
movk x17, #23752, lsl 32
adds x16, x20, x16
cinc x20, x21, hs
adds x12, x16, x12
cinc x16, x20, hs
movk x17, #17184, lsl 48
mul x20, x6, x10
dup.2d v7, x17
umulh x17, x6, x10
mov.16b v8, v21
adds x16, x20, x16
cinc x17, x17, hs
fmla.2d v8, v6, v7
adds x15, x16, x15
cinc x16, x17, hs
fsub.2d v9, v22, v8
mul x17, x7, x10
fmla.2d v9, v6, v7
umulh x10, x7, x10
add.2d v0, v0, v8
adds x16, x17, x16
cinc x10, x10, hs
adds x9, x16, x9
cinc x10, x10, hs
add.2d v4, v4, v9
mul x16, x1, x5
mov x17, #25532
umulh x20, x1, x5
movk x17, #31025, lsl 16
adds x12, x16, x12
cinc x16, x20, hs
movk x17, #10002, lsl 32
mul x20, x3, x5
movk x17, #17199, lsl 48
umulh x21, x3, x5
dup.2d v7, x17
adds x16, x20, x16
cinc x17, x21, hs
mov.16b v8, v21
adds x15, x16, x15
cinc x16, x17, hs
mul x17, x6, x5
fmla.2d v8, v6, v7
umulh x20, x6, x5
fsub.2d v9, v22, v8
adds x16, x17, x16
cinc x17, x20, hs
fmla.2d v9, v6, v7
adds x9, x16, x9
cinc x16, x17, hs
add.2d v1, v1, v8
mul x17, x7, x5
add.2d v0, v0, v9
umulh x5, x7, x5
mov x20, #18830
adds x16, x17, x16
cinc x5, x5, hs
movk x20, #2465, lsl 16
adds x10, x16, x10
cinc x5, x5, hs
mul x16, x1, x11
movk x20, #36348, lsl 32
umulh x1, x1, x11
movk x20, #17194, lsl 48
adds x15, x16, x15
cinc x1, x1, hs
dup.2d v7, x20
mul x16, x3, x11
mov.16b v8, v21
umulh x3, x3, x11
fmla.2d v8, v6, v7
adds x1, x16, x1
cinc x3, x3, hs
fsub.2d v9, v22, v8
adds x1, x1, x9
cinc x3, x3, hs
fmla.2d v9, v6, v7
mul x9, x6, x11
umulh x6, x6, x11
add.2d v2, v2, v8
adds x3, x9, x3
cinc x6, x6, hs
add.2d v1, v1, v9
adds x3, x3, x10
cinc x6, x6, hs
mov x9, #21566
mul x10, x7, x11
movk x9, #43708, lsl 16
umulh x7, x7, x11
movk x9, #57685, lsl 32
adds x6, x10, x6
cinc x7, x7, hs
movk x9, #17185, lsl 48
adds x5, x6, x5
cinc x6, x7, hs
dup.2d v7, x9
mov x7, #48718
movk x7, #4732, lsl 16
mov.16b v8, v21
movk x7, #45078, lsl 32
fmla.2d v8, v6, v7
movk x7, #39852, lsl 48
fsub.2d v9, v22, v8
mov x9, #16676
fmla.2d v9, v6, v7
movk x9, #12692, lsl 16
add.2d v5, v5, v8
movk x9, #20986, lsl 32
add.2d v2, v2, v9
movk x9, #2848, lsl 48
mov x10, #3072
mov x11, #51052
movk x11, #24721, lsl 16
movk x10, #8058, lsl 16
movk x11, #61092, lsl 32
movk x10, #46097, lsl 32
movk x11, #45156, lsl 48
movk x10, #17047, lsl 48
mov x16, #3197
dup.2d v7, x10
movk x16, #18936, lsl 16
mov.16b v8, v21
movk x16, #10922, lsl 32
fmla.2d v8, v6, v7
movk x16, #11014, lsl 48
fsub.2d v9, v22, v8
mul x10, x7, x13
umulh x7, x7, x13
fmla.2d v9, v6, v7
adds x10, x10, x15
cinc x7, x7, hs
add.2d v3, v3, v8
mul x15, x9, x13
add.2d v5, v5, v9
umulh x9, x9, x13
mov x17, #65535
adds x7, x15, x7
cinc x9, x9, hs
movk x17, #61439, lsl 16
adds x1, x7, x1
cinc x7, x9, hs
movk x17, #62867, lsl 32
mul x9, x11, x13
movk x17, #1, lsl 48
umulh x11, x11, x13
adds x7, x9, x7
cinc x9, x11, hs
umov x11, v4.d[0]
adds x3, x7, x3
cinc x7, x9, hs
umov x9, v4.d[1]
mul x15, x16, x13
mul x11, x11, x17
umulh x13, x16, x13
mul x9, x9, x17
adds x7, x15, x7
cinc x13, x13, hs
and x11, x11, x8
adds x5, x7, x5
cinc x7, x13, hs
and x8, x9, x8
add x6, x6, x7
ins v6.d[0], x11
ins v6.d[1], x8
mov x7, #56431
movk x7, #30457, lsl 16
ucvtf.2d v6, v6
movk x7, #30012, lsl 32
mov x8, #16
movk x7, #6382, lsl 48
movk x8, #22847, lsl 32
mov x9, #59151
movk x8, #17151, lsl 48
movk x9, #41769, lsl 16
dup.2d v7, x8
movk x9, #32276, lsl 32
mov.16b v8, v21
movk x9, #21677, lsl 48
fmla.2d v8, v6, v7
mov x8, #34015
movk x8, #20342, lsl 16
fsub.2d v9, v22, v8
movk x8, #13935, lsl 32
fmla.2d v9, v6, v7
movk x8, #11030, lsl 48
add.2d v0, v0, v8
mov x11, #13689
add.2d v4, v4, v9
movk x11, #8159, lsl 16
mov x13, #20728
movk x11, #215, lsl 32
movk x13, #23588, lsl 16
movk x11, #4913, lsl 48
movk x13, #7790, lsl 32
mul x15, x7, x14
umulh x7, x7, x14
movk x13, #17170, lsl 48
adds x10, x15, x10
cinc x7, x7, hs
dup.2d v7, x13
mul x13, x9, x14
mov.16b v8, v21
umulh x9, x9, x14
fmla.2d v8, v6, v7
adds x7, x13, x7
cinc x9, x9, hs
fsub.2d v9, v22, v8
adds x1, x7, x1
cinc x7, x9, hs
fmla.2d v9, v6, v7
mul x9, x8, x14
add.2d v1, v1, v8
umulh x8, x8, x14
adds x7, x9, x7
cinc x8, x8, hs
add.2d v0, v0, v9
adds x3, x7, x3
cinc x7, x8, hs
mov x8, #16000
mul x9, x11, x14
movk x8, #53891, lsl 16
umulh x11, x11, x14
movk x8, #5509, lsl 32
adds x7, x9, x7
cinc x9, x11, hs
movk x8, #17144, lsl 48
adds x5, x7, x5
cinc x7, x9, hs
dup.2d v7, x8
add x6, x6, x7
mov.16b v8, v21
mov x7, #61005
movk x7, #58262, lsl 16
fmla.2d v8, v6, v7
movk x7, #32851, lsl 32
fsub.2d v9, v22, v8
movk x7, #11582, lsl 48
fmla.2d v9, v6, v7
mov x8, #37581
add.2d v2, v2, v8
movk x8, #43836, lsl 16
add.2d v1, v1, v9
movk x8, #36286, lsl 32
mov x9, #46800
movk x8, #51783, lsl 48
movk x9, #2568, lsl 16
mov x11, #10899
movk x11, #30709, lsl 16
movk x9, #1335, lsl 32
movk x11, #61551, lsl 32
movk x9, #17188, lsl 48
movk x11, #45784, lsl 48
dup.2d v7, x9
mov x9, #36612
mov.16b v8, v21
movk x9, #63402, lsl 16
fmla.2d v8, v6, v7
movk x9, #47623, lsl 32
fsub.2d v9, v22, v8
movk x9, #9430, lsl 48
fmla.2d v9, v6, v7
mul x13, x7, x12
umulh x7, x7, x12
add.2d v5, v5, v8
adds x10, x13, x10
cinc x7, x7, hs
add.2d v2, v2, v9
mul x13, x8, x12
mov x14, #39040
umulh x8, x8, x12
movk x14, #14704, lsl 16
adds x7, x13, x7
cinc x8, x8, hs
movk x14, #12839, lsl 32
adds x1, x7, x1
cinc x7, x8, hs
movk x14, #17096, lsl 48
mul x8, x11, x12
dup.2d v7, x14
umulh x11, x11, x12
adds x7, x8, x7
cinc x8, x11, hs
mov.16b v8, v21
adds x3, x7, x3
cinc x7, x8, hs
fmla.2d v8, v6, v7
mul x8, x9, x12
fsub.2d v9, v22, v8
umulh x9, x9, x12
fmla.2d v9, v6, v7
adds x7, x8, x7
cinc x8, x9, hs
add.2d v3, v3, v8
adds x5, x7, x5
cinc x7, x8, hs
add.2d v5, v5, v9
add x6, x6, x7
mov x7, #140737488355328
mov x8, #65535
movk x8, #61439, lsl 16
dup.2d v6, x7
movk x8, #62867, lsl 32
and.16b v6, v3, v6
movk x8, #49889, lsl 48
cmeq.2d v6, v6, #0
mul x7, x8, x10
mov x8, #2
mov x9, #1
movk x8, #57344, lsl 16
movk x9, #61440, lsl 16
movk x8, #60199, lsl 32
movk x9, #62867, lsl 32
movk x8, #3, lsl 48
movk x9, #17377, lsl 48
mov x11, #28817
dup.2d v7, x8
movk x11, #31161, lsl 16
bic.16b v7, v7, v6
movk x11, #59464, lsl 32
mov x8, #10364
movk x11, #10291, lsl 48
movk x8, #11794, lsl 16
mov x12, #22621
movk x8, #3895, lsl 32
movk x12, #33153, lsl 16
movk x8, #9, lsl 48
movk x12, #17846, lsl 32
dup.2d v8, x8
movk x12, #47184, lsl 48
mov x8, #41001
bic.16b v8, v8, v6
movk x8, #57649, lsl 16
mov x13, #26576
movk x8, #20082, lsl 32
movk x13, #47696, lsl 16
movk x8, #12388, lsl 48
movk x13, #688, lsl 32
mul x14, x9, x7
movk x13, #3, lsl 48
umulh x9, x9, x7
dup.2d v9, x13
cmn x14, x10
cinc x9, x9, hs
bic.16b v9, v9, v6
mul x10, x11, x7
umulh x11, x11, x7
mov x13, #46800
adds x9, x10, x9
cinc x10, x11, hs
movk x13, #2568, lsl 16
adds x1, x9, x1
cinc x9, x10, hs
movk x13, #1335, lsl 32
mul x10, x12, x7
movk x13, #4, lsl 48
umulh x11, x12, x7
dup.2d v10, x13
adds x9, x10, x9
cinc x10, x11, hs
bic.16b v10, v10, v6
adds x3, x9, x3
cinc x9, x10, hs
mov x10, #49763
mul x11, x8, x7
umulh x7, x8, x7
movk x10, #40165, lsl 16
adds x8, x11, x9
cinc x7, x7, hs
movk x10, #24776, lsl 32
adds x5, x8, x5
cinc x7, x7, hs
dup.2d v11, x10
add x6, x6, x7
bic.16b v6, v11, v6
mov x7, #2
sub.2d v0, v0, v7
movk x7, #57344, lsl 16
ssra.2d v0, v4, #52
movk x7, #60199, lsl 32
sub.2d v4, v1, v8
movk x7, #34755, lsl 48
mov x8, #57634
ssra.2d v4, v0, #52
movk x8, #62322, lsl 16
sub.2d v7, v2, v9
movk x8, #53392, lsl 32
ssra.2d v7, v4, #52
movk x8, #20583, lsl 48
sub.2d v5, v5, v10
mov x9, #45242
ssra.2d v5, v7, #52
movk x9, #770, lsl 16
sub.2d v6, v3, v6
movk x9, #35693, lsl 32
ssra.2d v6, v5, #52
movk x9, #28832, lsl 48
mov x10, #16467
ushr.2d v1, v4, #12
movk x10, #49763, lsl 16
ushr.2d v2, v7, #24
movk x10, #40165, lsl 32
ushr.2d v3, v5, #36
movk x10, #24776, lsl 48
sli.2d v0, v4, #52
subs x7, x1, x7
sbcs x8, x3, x8
sbcs x9, x5, x9
sbcs x10, x6, x10
sli.2d v1, v7, #40
tst x6, #9223372036854775808
csel x1, x7, x1, mi
csel x3, x8, x3, mi
csel x5, x9, x5, mi
csel x6, x10, x6, mi
sli.2d v2, v5, #28
stp x1, x3, [x4, #0]
stp x5, x6, [x4, #16]
sli.2d v3, v6, #16
ret
|
xrvdg/modmulzoo
| 24,774
|
crates/modmul-asm/asm/single_step_interleaved_seq_scalar.s
|
//in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3],
//in("x4") b[0], in("x5") b[1], in("x6") b[2], in("x7") b[3],
//in("x8") a1[0], in("x9") a1[1], in("x10") a1[2], in("x11") a1[3],
//in("x12") b1[0], in("x13") b1[1], in("x14") b1[2], in("x15") b1[3],
//in("v0") av[0], in("v1") av[1], in("v2") av[2], in("v3") av[3],
//in("v4") bv[0], in("v5") bv[1], in("v6") bv[2], in("v7") bv[3],
//lateout("x0") out[0], lateout("x1") out[1], lateout("x2") out[2], lateout("x3") out[3],
//lateout("x4") out1[0], lateout("x5") out1[1], lateout("x6") out1[2], lateout("x7") out1[3],
//lateout("v0") outv[0], lateout("v1") outv[1], lateout("v2") outv[2], lateout("v3") outv[3],
//lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _, lateout("x16") _, lateout("x17") _, lateout("x20") _, lateout("x21") _, lateout("x22") _, lateout("x23") _, lateout("x24") _, lateout("x25") _, lateout("x26") _, lateout("v4") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, lateout("v20") _, lateout("v21") _, lateout("v22") _, lateout("v23") _, lateout("v24") _,
//lateout("lr") _
.global _single_step_interleaved_seq_scalar
.align 4
.text
_single_step_interleaved_seq_scalar:
mov x16, #4503599627370495
mul x17, x0, x4
dup.2d v8, x16
umulh x20, x0, x4
shl.2d v9, v1, #14
shl.2d v10, v2, #26
mul x21, x1, x4
shl.2d v11, v3, #38
umulh x22, x1, x4
ushr.2d v3, v3, #14
adds x20, x21, x20
cinc x21, x22, hs
shl.2d v12, v0, #2
usra.2d v9, v0, #50
mul x22, x2, x4
usra.2d v10, v1, #38
umulh x23, x2, x4
usra.2d v11, v2, #26
adds x21, x22, x21
cinc x22, x23, hs
and.16b v0, v12, v8
and.16b v1, v9, v8
mul x23, x3, x4
and.16b v2, v10, v8
umulh x4, x3, x4
and.16b v9, v11, v8
adds x22, x23, x22
cinc x4, x4, hs
shl.2d v10, v5, #14
shl.2d v11, v6, #26
mul x23, x0, x5
shl.2d v12, v7, #38
umulh x24, x0, x5
ushr.2d v7, v7, #14
adds x20, x23, x20
cinc x23, x24, hs
shl.2d v13, v4, #2
usra.2d v10, v4, #50
mul x24, x1, x5
usra.2d v11, v5, #38
umulh x25, x1, x5
usra.2d v12, v6, #26
adds x23, x24, x23
cinc x24, x25, hs
and.16b v4, v13, v8
and.16b v5, v10, v8
adds x21, x23, x21
cinc x23, x24, hs
and.16b v6, v11, v8
mul x24, x2, x5
and.16b v10, v12, v8
mov x25, #13605374474286268416
umulh x26, x2, x5
dup.2d v11, x25
adds x23, x24, x23
cinc x24, x26, hs
mov x25, #6440147467139809280
adds x22, x23, x22
cinc x23, x24, hs
dup.2d v12, x25
mov x24, #3688448094816436224
mul x25, x3, x5
dup.2d v13, x24
umulh x5, x3, x5
mov x24, #9209861237972664320
adds x23, x25, x23
cinc x5, x5, hs
dup.2d v14, x24
mov x24, #12218265789056155648
adds x4, x23, x4
cinc x5, x5, hs
dup.2d v15, x24
mul x23, x0, x6
mov x24, #17739678932212383744
umulh x25, x0, x6
dup.2d v16, x24
mov x24, #2301339409586323456
adds x21, x23, x21
cinc x23, x25, hs
dup.2d v17, x24
mul x24, x1, x6
mov x25, #7822752552742551552
umulh x26, x1, x6
dup.2d v18, x25
mov x25, #5071053180419178496
adds x23, x24, x23
cinc x24, x26, hs
dup.2d v19, x25
adds x22, x23, x22
cinc x23, x24, hs
mov x24, #16352570246982270976
mul x25, x2, x6
dup.2d v20, x24
mov x24, #5075556780046548992
umulh x26, x2, x6
dup.2d v21, x24
adds x23, x25, x23
cinc x24, x26, hs
mov x25, #1
adds x4, x23, x4
cinc x23, x24, hs
movk x25, #18032, lsl 48
dup.2d v22, x25
mul x24, x3, x6
ucvtf.2d v0, v0
umulh x6, x3, x6
ucvtf.2d v1, v1
ucvtf.2d v2, v2
adds x23, x24, x23
cinc x6, x6, hs
ucvtf.2d v9, v9
adds x5, x23, x5
cinc x6, x6, hs
ucvtf.2d v3, v3
mul x23, x0, x7
ucvtf.2d v4, v4
ucvtf.2d v5, v5
umulh x0, x0, x7
ucvtf.2d v6, v6
adds x22, x23, x22
cinc x0, x0, hs
ucvtf.2d v10, v10
mul x23, x1, x7
ucvtf.2d v7, v7
mov.16b v23, v21
umulh x1, x1, x7
fmla.2d v23, v0, v4
adds x0, x23, x0
cinc x1, x1, hs
fsub.2d v24, v22, v23
adds x0, x0, x4
cinc x1, x1, hs
fmla.2d v24, v0, v4
add.2d v13, v13, v23
mul x4, x2, x7
add.2d v11, v11, v24
umulh x2, x2, x7
mov.16b v23, v21
adds x1, x4, x1
cinc x2, x2, hs
fmla.2d v23, v0, v5
fsub.2d v24, v22, v23
adds x1, x1, x5
cinc x2, x2, hs
fmla.2d v24, v0, v5
mul x4, x3, x7
add.2d v15, v15, v23
umulh x3, x3, x7
add.2d v13, v13, v24
mov.16b v23, v21
adds x2, x4, x2
cinc x3, x3, hs
fmla.2d v23, v0, v6
adds x2, x2, x6
cinc x3, x3, hs
fsub.2d v24, v22, v23
mov x4, #48718
fmla.2d v24, v0, v6
add.2d v17, v17, v23
movk x4, #4732, lsl 16
add.2d v15, v15, v24
movk x4, #45078, lsl 32
mov.16b v23, v21
fmla.2d v23, v0, v10
movk x4, #39852, lsl 48
fsub.2d v24, v22, v23
mov x5, #16676
fmla.2d v24, v0, v10
movk x5, #12692, lsl 16
add.2d v19, v19, v23
add.2d v17, v17, v24
movk x5, #20986, lsl 32
mov.16b v23, v21
movk x5, #2848, lsl 48
fmla.2d v23, v0, v7
mov x6, #51052
fsub.2d v24, v22, v23
fmla.2d v24, v0, v7
movk x6, #24721, lsl 16
add.2d v0, v20, v23
movk x6, #61092, lsl 32
add.2d v19, v19, v24
movk x6, #45156, lsl 48
mov.16b v20, v21
fmla.2d v20, v1, v4
mov x7, #3197
fsub.2d v23, v22, v20
movk x7, #18936, lsl 16
fmla.2d v23, v1, v4
movk x7, #10922, lsl 32
add.2d v15, v15, v20
add.2d v13, v13, v23
movk x7, #11014, lsl 48
mov.16b v20, v21
mul x23, x4, x17
fmla.2d v20, v1, v5
umulh x4, x4, x17
fsub.2d v23, v22, v20
fmla.2d v23, v1, v5
adds x22, x23, x22
cinc x4, x4, hs
add.2d v17, v17, v20
mul x23, x5, x17
add.2d v15, v15, v23
umulh x5, x5, x17
mov.16b v20, v21
fmla.2d v20, v1, v6
adds x4, x23, x4
cinc x5, x5, hs
fsub.2d v23, v22, v20
adds x0, x4, x0
cinc x4, x5, hs
fmla.2d v23, v1, v6
add.2d v19, v19, v20
mul x5, x6, x17
add.2d v17, v17, v23
umulh x6, x6, x17
mov.16b v20, v21
adds x4, x5, x4
cinc x5, x6, hs
fmla.2d v20, v1, v10
fsub.2d v23, v22, v20
adds x1, x4, x1
cinc x4, x5, hs
fmla.2d v23, v1, v10
mul x5, x7, x17
add.2d v0, v0, v20
umulh x6, x7, x17
add.2d v19, v19, v23
mov.16b v20, v21
adds x4, x5, x4
cinc x5, x6, hs
fmla.2d v20, v1, v7
adds x2, x4, x2
cinc x4, x5, hs
fsub.2d v23, v22, v20
add x3, x3, x4
fmla.2d v23, v1, v7
add.2d v1, v18, v20
mov x4, #56431
add.2d v0, v0, v23
movk x4, #30457, lsl 16
mov.16b v18, v21
movk x4, #30012, lsl 32
fmla.2d v18, v2, v4
fsub.2d v20, v22, v18
movk x4, #6382, lsl 48
fmla.2d v20, v2, v4
mov x5, #59151
add.2d v17, v17, v18
movk x5, #41769, lsl 16
add.2d v15, v15, v20
mov.16b v18, v21
movk x5, #32276, lsl 32
fmla.2d v18, v2, v5
movk x5, #21677, lsl 48
fsub.2d v20, v22, v18
mov x6, #34015
fmla.2d v20, v2, v5
add.2d v18, v19, v18
movk x6, #20342, lsl 16
add.2d v17, v17, v20
movk x6, #13935, lsl 32
mov.16b v19, v21
fmla.2d v19, v2, v6
movk x6, #11030, lsl 48
fsub.2d v20, v22, v19
mov x7, #13689
fmla.2d v20, v2, v6
movk x7, #8159, lsl 16
add.2d v0, v0, v19
add.2d v18, v18, v20
movk x7, #215, lsl 32
mov.16b v19, v21
movk x7, #4913, lsl 48
fmla.2d v19, v2, v10
mul x17, x4, x20
fsub.2d v20, v22, v19
fmla.2d v20, v2, v10
umulh x4, x4, x20
add.2d v1, v1, v19
adds x17, x17, x22
cinc x4, x4, hs
add.2d v0, v0, v20
mul x22, x5, x20
mov.16b v19, v21
fmla.2d v19, v2, v7
umulh x5, x5, x20
fsub.2d v20, v22, v19
adds x4, x22, x4
cinc x5, x5, hs
fmla.2d v20, v2, v7
adds x0, x4, x0
cinc x4, x5, hs
add.2d v2, v16, v19
add.2d v1, v1, v20
mul x5, x6, x20
mov.16b v16, v21
umulh x6, x6, x20
fmla.2d v16, v9, v4
adds x4, x5, x4
cinc x5, x6, hs
fsub.2d v19, v22, v16
fmla.2d v19, v9, v4
adds x1, x4, x1
cinc x4, x5, hs
add.2d v16, v18, v16
mul x5, x7, x20
add.2d v17, v17, v19
umulh x6, x7, x20
mov.16b v18, v21
fmla.2d v18, v9, v5
adds x4, x5, x4
cinc x5, x6, hs
fsub.2d v19, v22, v18
adds x2, x4, x2
cinc x4, x5, hs
fmla.2d v19, v9, v5
add.2d v0, v0, v18
add x3, x3, x4
add.2d v16, v16, v19
mov x4, #61005
mov.16b v18, v21
movk x4, #58262, lsl 16
fmla.2d v18, v9, v6
fsub.2d v19, v22, v18
movk x4, #32851, lsl 32
fmla.2d v19, v9, v6
movk x4, #11582, lsl 48
add.2d v1, v1, v18
mov x5, #37581
add.2d v0, v0, v19
mov.16b v18, v21
movk x5, #43836, lsl 16
fmla.2d v18, v9, v10
movk x5, #36286, lsl 32
fsub.2d v19, v22, v18
movk x5, #51783, lsl 48
fmla.2d v19, v9, v10
add.2d v2, v2, v18
mov x6, #10899
add.2d v1, v1, v19
movk x6, #30709, lsl 16
mov.16b v18, v21
movk x6, #61551, lsl 32
fmla.2d v18, v9, v7
fsub.2d v19, v22, v18
movk x6, #45784, lsl 48
fmla.2d v19, v9, v7
mov x7, #36612
add.2d v9, v14, v18
movk x7, #63402, lsl 16
add.2d v2, v2, v19
mov.16b v14, v21
movk x7, #47623, lsl 32
fmla.2d v14, v3, v4
movk x7, #9430, lsl 48
fsub.2d v18, v22, v14
mul x20, x4, x21
fmla.2d v18, v3, v4
add.2d v0, v0, v14
umulh x4, x4, x21
add.2d v4, v16, v18
adds x17, x20, x17
cinc x4, x4, hs
mov.16b v14, v21
fmla.2d v14, v3, v5
mul x20, x5, x21
fsub.2d v16, v22, v14
umulh x5, x5, x21
fmla.2d v16, v3, v5
adds x4, x20, x4
cinc x5, x5, hs
add.2d v1, v1, v14
add.2d v0, v0, v16
adds x0, x4, x0
cinc x4, x5, hs
mov.16b v5, v21
mul x5, x6, x21
fmla.2d v5, v3, v6
umulh x6, x6, x21
fsub.2d v14, v22, v5
fmla.2d v14, v3, v6
adds x4, x5, x4
cinc x5, x6, hs
add.2d v2, v2, v5
adds x1, x4, x1
cinc x4, x5, hs
add.2d v1, v1, v14
mul x5, x7, x21
mov.16b v5, v21
fmla.2d v5, v3, v10
umulh x6, x7, x21
fsub.2d v6, v22, v5
adds x4, x5, x4
cinc x5, x6, hs
fmla.2d v6, v3, v10
adds x2, x4, x2
cinc x4, x5, hs
add.2d v5, v9, v5
add.2d v2, v2, v6
add x3, x3, x4
mov.16b v6, v21
mov x4, #65535
fmla.2d v6, v3, v7
movk x4, #61439, lsl 16
fsub.2d v9, v22, v6
fmla.2d v9, v3, v7
movk x4, #62867, lsl 32
add.2d v3, v12, v6
movk x4, #49889, lsl 48
add.2d v5, v5, v9
mul x4, x4, x17
usra.2d v13, v11, #52
usra.2d v15, v13, #52
mov x5, #1
usra.2d v17, v15, #52
movk x5, #61440, lsl 16
usra.2d v4, v17, #52
and.16b v6, v11, v8
movk x5, #62867, lsl 32
and.16b v7, v13, v8
movk x5, #17377, lsl 48
and.16b v9, v15, v8
mov x6, #28817
and.16b v8, v17, v8
ucvtf.2d v6, v6
movk x6, #31161, lsl 16
mov x7, #37864
movk x6, #59464, lsl 32
movk x7, #1815, lsl 16
movk x6, #10291, lsl 48
movk x7, #28960, lsl 32
movk x7, #17153, lsl 48
mov x20, #22621
dup.2d v10, x7
movk x20, #33153, lsl 16
mov.16b v11, v21
movk x20, #17846, lsl 32
fmla.2d v11, v6, v10
fsub.2d v12, v22, v11
movk x20, #47184, lsl 48
fmla.2d v12, v6, v10
mov x7, #41001
add.2d v0, v0, v11
movk x7, #57649, lsl 16
add.2d v4, v4, v12
mov x21, #46128
movk x7, #20082, lsl 32
movk x21, #29964, lsl 16
movk x7, #12388, lsl 48
movk x21, #7587, lsl 32
mul x22, x5, x4
movk x21, #17161, lsl 48
dup.2d v10, x21
umulh x5, x5, x4
mov.16b v11, v21
cmn x22, x17
cinc x5, x5, hs
fmla.2d v11, v6, v10
mul x17, x6, x4
fsub.2d v12, v22, v11
fmla.2d v12, v6, v10
umulh x6, x6, x4
add.2d v1, v1, v11
adds x5, x17, x5
cinc x6, x6, hs
add.2d v0, v0, v12
mov x17, #52826
adds x0, x5, x0
cinc x5, x6, hs
movk x17, #57790, lsl 16
mul x6, x20, x4
movk x17, #55431, lsl 32
umulh x20, x20, x4
movk x17, #17196, lsl 48
dup.2d v10, x17
adds x5, x6, x5
cinc x6, x20, hs
mov.16b v11, v21
adds x1, x5, x1
cinc x5, x6, hs
fmla.2d v11, v6, v10
mul x6, x7, x4
fsub.2d v12, v22, v11
fmla.2d v12, v6, v10
umulh x4, x7, x4
add.2d v2, v2, v11
adds x5, x6, x5
cinc x4, x4, hs
add.2d v1, v1, v12
adds x2, x5, x2
cinc x4, x4, hs
mov x5, #31276
movk x5, #21262, lsl 16
add x3, x3, x4
movk x5, #2304, lsl 32
mov x4, #2
movk x5, #17182, lsl 48
movk x4, #57344, lsl 16
dup.2d v10, x5
mov.16b v11, v21
movk x4, #60199, lsl 32
fmla.2d v11, v6, v10
movk x4, #34755, lsl 48
fsub.2d v12, v22, v11
mov x5, #57634
fmla.2d v12, v6, v10
add.2d v5, v5, v11
movk x5, #62322, lsl 16
add.2d v2, v2, v12
movk x5, #53392, lsl 32
mov x6, #28672
movk x5, #20583, lsl 48
movk x6, #24515, lsl 16
movk x6, #54929, lsl 32
mov x7, #45242
movk x6, #17064, lsl 48
movk x7, #770, lsl 16
dup.2d v10, x6
mov.16b v11, v21
movk x7, #35693, lsl 32
fmla.2d v11, v6, v10
movk x7, #28832, lsl 48
fsub.2d v12, v22, v11
mov x6, #16467
fmla.2d v12, v6, v10
add.2d v3, v3, v11
movk x6, #49763, lsl 16
add.2d v5, v5, v12
movk x6, #40165, lsl 32
ucvtf.2d v6, v7
movk x6, #24776, lsl 48
mov x17, #44768
movk x17, #51919, lsl 16
subs x4, x0, x4
sbcs x5, x1, x5
sbcs x7, x2, x7
sbcs x6, x3, x6
movk x17, #6346, lsl 32
tst x3, #9223372036854775808
csel x0, x4, x0, mi
csel x1, x5, x1, mi
csel x2, x7, x2, mi
csel x3, x6, x3, mi
movk x17, #17133, lsl 48
mul x4, x8, x12
dup.2d v7, x17
mov.16b v10, v21
umulh x5, x8, x12
fmla.2d v10, v6, v7
mul x6, x9, x12
fsub.2d v11, v22, v10
umulh x7, x9, x12
fmla.2d v11, v6, v7
add.2d v0, v0, v10
adds x5, x6, x5
cinc x6, x7, hs
add.2d v4, v4, v11
mul x7, x10, x12
mov x17, #47492
umulh x20, x10, x12
movk x17, #23630, lsl 16
movk x17, #49985, lsl 32
adds x6, x7, x6
cinc x7, x20, hs
movk x17, #17168, lsl 48
mul x20, x11, x12
dup.2d v7, x17
mov.16b v10, v21
umulh x12, x11, x12
fmla.2d v10, v6, v7
adds x7, x20, x7
cinc x12, x12, hs
fsub.2d v11, v22, v10
mul x17, x8, x13
fmla.2d v11, v6, v7
add.2d v1, v1, v10
umulh x20, x8, x13
add.2d v0, v0, v11
adds x5, x17, x5
cinc x17, x20, hs
mov x20, #57936
mul x21, x9, x13
movk x20, #54828, lsl 16
movk x20, #18292, lsl 32
umulh x22, x9, x13
movk x20, #17197, lsl 48
adds x17, x21, x17
cinc x21, x22, hs
dup.2d v7, x20
adds x6, x17, x6
cinc x17, x21, hs
mov.16b v10, v21
fmla.2d v10, v6, v7
mul x20, x10, x13
fsub.2d v11, v22, v10
umulh x21, x10, x13
fmla.2d v11, v6, v7
adds x17, x20, x17
cinc x20, x21, hs
add.2d v2, v2, v10
add.2d v1, v1, v11
adds x7, x17, x7
cinc x17, x20, hs
mov x20, #17708
mul x21, x11, x13
movk x20, #43915, lsl 16
umulh x13, x11, x13
movk x20, #64348, lsl 32
movk x20, #17188, lsl 48
adds x17, x21, x17
cinc x13, x13, hs
dup.2d v7, x20
adds x12, x17, x12
cinc x13, x13, hs
mov.16b v10, v21
mul x17, x8, x14
fmla.2d v10, v6, v7
fsub.2d v11, v22, v10
umulh x20, x8, x14
fmla.2d v11, v6, v7
adds x6, x17, x6
cinc x17, x20, hs
add.2d v5, v5, v10
add.2d v2, v2, v11
mul x20, x9, x14
mov x21, #29184
umulh x22, x9, x14
movk x21, #20789, lsl 16
adds x17, x20, x17
cinc x20, x22, hs
movk x21, #19197, lsl 32
movk x21, #17083, lsl 48
adds x7, x17, x7
cinc x17, x20, hs
dup.2d v7, x21
mul x20, x10, x14
mov.16b v10, v21
umulh x21, x10, x14
fmla.2d v10, v6, v7
fsub.2d v11, v22, v10
adds x17, x20, x17
cinc x20, x21, hs
fmla.2d v11, v6, v7
adds x12, x17, x12
cinc x17, x20, hs
add.2d v3, v3, v10
mul x20, x11, x14
add.2d v5, v5, v11
ucvtf.2d v6, v9
umulh x14, x11, x14
mov x21, #58856
adds x17, x20, x17
cinc x14, x14, hs
movk x21, #14953, lsl 16
adds x13, x17, x13
cinc x14, x14, hs
movk x21, #15155, lsl 32
movk x21, #17181, lsl 48
mul x17, x8, x15
dup.2d v7, x21
umulh x8, x8, x15
mov.16b v9, v21
adds x7, x17, x7
cinc x8, x8, hs
fmla.2d v9, v6, v7
fsub.2d v10, v22, v9
mul x17, x9, x15
fmla.2d v10, v6, v7
umulh x9, x9, x15
add.2d v0, v0, v9
adds x8, x17, x8
cinc x9, x9, hs
add.2d v4, v4, v10
mov x17, #35392
adds x8, x8, x12
cinc x9, x9, hs
movk x17, #12477, lsl 16
mul x12, x10, x15
movk x17, #56780, lsl 32
movk x17, #17142, lsl 48
umulh x10, x10, x15
dup.2d v7, x17
adds x9, x12, x9
cinc x10, x10, hs
mov.16b v9, v21
adds x9, x9, x13
cinc x10, x10, hs
fmla.2d v9, v6, v7
fsub.2d v10, v22, v9
mul x12, x11, x15
fmla.2d v10, v6, v7
umulh x11, x11, x15
add.2d v1, v1, v9
adds x10, x12, x10
cinc x11, x11, hs
add.2d v0, v0, v10
mov x12, #9848
adds x10, x10, x14
cinc x11, x11, hs
movk x12, #54501, lsl 16
mov x13, #48718
movk x12, #31540, lsl 32
movk x13, #4732, lsl 16
movk x12, #17170, lsl 48
dup.2d v7, x12
movk x13, #45078, lsl 32
mov.16b v9, v21
movk x13, #39852, lsl 48
fmla.2d v9, v6, v7
mov x12, #16676
fsub.2d v10, v22, v9
fmla.2d v10, v6, v7
movk x12, #12692, lsl 16
add.2d v2, v2, v9
movk x12, #20986, lsl 32
add.2d v1, v1, v10
movk x12, #2848, lsl 48
mov x14, #9584
movk x14, #63883, lsl 16
mov x15, #51052
movk x14, #18253, lsl 32
movk x15, #24721, lsl 16
movk x14, #17190, lsl 48
movk x15, #61092, lsl 32
dup.2d v7, x14
mov.16b v9, v21
movk x15, #45156, lsl 48
fmla.2d v9, v6, v7
mov x14, #3197
fsub.2d v10, v22, v9
fmla.2d v10, v6, v7
movk x14, #18936, lsl 16
add.2d v5, v5, v9
movk x14, #10922, lsl 32
add.2d v2, v2, v10
movk x14, #11014, lsl 48
mov x17, #51712
movk x17, #16093, lsl 16
mul x20, x13, x4
movk x17, #30633, lsl 32
umulh x13, x13, x4
movk x17, #17068, lsl 48
adds x7, x20, x7
cinc x13, x13, hs
dup.2d v7, x17
mov.16b v9, v21
mul x17, x12, x4
fmla.2d v9, v6, v7
umulh x12, x12, x4
fsub.2d v10, v22, v9
adds x13, x17, x13
cinc x12, x12, hs
fmla.2d v10, v6, v7
add.2d v3, v3, v9
adds x8, x13, x8
cinc x12, x12, hs
add.2d v5, v5, v10
mul x13, x15, x4
ucvtf.2d v6, v8
umulh x15, x15, x4
mov x17, #34724
movk x17, #40393, lsl 16
adds x12, x13, x12
cinc x13, x15, hs
movk x17, #23752, lsl 32
adds x9, x12, x9
cinc x12, x13, hs
movk x17, #17184, lsl 48
mul x13, x14, x4
dup.2d v7, x17
mov.16b v8, v21
umulh x4, x14, x4
fmla.2d v8, v6, v7
adds x12, x13, x12
cinc x4, x4, hs
fsub.2d v9, v22, v8
adds x10, x12, x10
cinc x4, x4, hs
fmla.2d v9, v6, v7
add.2d v0, v0, v8
add x4, x11, x4
add.2d v4, v4, v9
mov x11, #56431
mov x12, #25532
movk x12, #31025, lsl 16
movk x11, #30457, lsl 16
movk x12, #10002, lsl 32
movk x11, #30012, lsl 32
movk x12, #17199, lsl 48
movk x11, #6382, lsl 48
dup.2d v7, x12
mov.16b v8, v21
mov x12, #59151
fmla.2d v8, v6, v7
movk x12, #41769, lsl 16
fsub.2d v9, v22, v8
movk x12, #32276, lsl 32
fmla.2d v9, v6, v7
add.2d v1, v1, v8
movk x12, #21677, lsl 48
add.2d v0, v0, v9
mov x13, #34015
mov x14, #18830
movk x13, #20342, lsl 16
movk x14, #2465, lsl 16
movk x14, #36348, lsl 32
movk x13, #13935, lsl 32
movk x14, #17194, lsl 48
movk x13, #11030, lsl 48
dup.2d v7, x14
mov x14, #13689
mov.16b v8, v21
fmla.2d v8, v6, v7
movk x14, #8159, lsl 16
fsub.2d v9, v22, v8
movk x14, #215, lsl 32
fmla.2d v9, v6, v7
movk x14, #4913, lsl 48
add.2d v2, v2, v8
add.2d v1, v1, v9
mul x15, x11, x5
mov x17, #21566
umulh x11, x11, x5
movk x17, #43708, lsl 16
adds x7, x15, x7
cinc x11, x11, hs
movk x17, #57685, lsl 32
movk x17, #17185, lsl 48
mul x15, x12, x5
dup.2d v7, x17
umulh x12, x12, x5
mov.16b v8, v21
fmla.2d v8, v6, v7
adds x11, x15, x11
cinc x12, x12, hs
fsub.2d v9, v22, v8
adds x8, x11, x8
cinc x11, x12, hs
fmla.2d v9, v6, v7
mul x12, x13, x5
add.2d v5, v5, v8
add.2d v2, v2, v9
umulh x13, x13, x5
mov x15, #3072
adds x11, x12, x11
cinc x12, x13, hs
movk x15, #8058, lsl 16
adds x9, x11, x9
cinc x11, x12, hs
movk x15, #46097, lsl 32
movk x15, #17047, lsl 48
mul x12, x14, x5
dup.2d v7, x15
umulh x5, x14, x5
mov.16b v8, v21
adds x11, x12, x11
cinc x5, x5, hs
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
adds x10, x11, x10
cinc x5, x5, hs
fmla.2d v9, v6, v7
add x4, x4, x5
add.2d v3, v3, v8
mov x5, #61005
add.2d v5, v5, v9
mov x11, #65535
movk x5, #58262, lsl 16
movk x11, #61439, lsl 16
movk x5, #32851, lsl 32
movk x11, #62867, lsl 32
movk x5, #11582, lsl 48
movk x11, #1, lsl 48
umov x12, v4.d[0]
mov x13, #37581
umov x14, v4.d[1]
movk x13, #43836, lsl 16
mul x12, x12, x11
movk x13, #36286, lsl 32
mul x11, x14, x11
and x12, x12, x16
movk x13, #51783, lsl 48
and x11, x11, x16
mov x14, #10899
ins v6.d[0], x12
ins v6.d[1], x11
ucvtf.2d v6, v6
movk x14, #30709, lsl 16
mov x11, #16
movk x14, #61551, lsl 32
movk x11, #22847, lsl 32
movk x14, #45784, lsl 48
movk x11, #17151, lsl 48
dup.2d v7, x11
mov x11, #36612
mov.16b v8, v21
movk x11, #63402, lsl 16
fmla.2d v8, v6, v7
movk x11, #47623, lsl 32
fsub.2d v9, v22, v8
fmla.2d v9, v6, v7
movk x11, #9430, lsl 48
add.2d v0, v0, v8
mul x12, x5, x6
add.2d v4, v4, v9
umulh x5, x5, x6
mov x15, #20728
movk x15, #23588, lsl 16
adds x7, x12, x7
cinc x5, x5, hs
movk x15, #7790, lsl 32
mul x12, x13, x6
movk x15, #17170, lsl 48
umulh x13, x13, x6
dup.2d v7, x15
mov.16b v8, v21
adds x5, x12, x5
cinc x12, x13, hs
fmla.2d v8, v6, v7
adds x5, x5, x8
cinc x8, x12, hs
fsub.2d v9, v22, v8
mul x12, x14, x6
fmla.2d v9, v6, v7
add.2d v1, v1, v8
umulh x13, x14, x6
add.2d v0, v0, v9
adds x8, x12, x8
cinc x12, x13, hs
mov x13, #16000
adds x8, x8, x9
cinc x9, x12, hs
movk x13, #53891, lsl 16
movk x13, #5509, lsl 32
mul x12, x11, x6
movk x13, #17144, lsl 48
umulh x6, x11, x6
dup.2d v7, x13
mov.16b v8, v21
adds x9, x12, x9
cinc x6, x6, hs
fmla.2d v8, v6, v7
adds x9, x9, x10
cinc x6, x6, hs
fsub.2d v9, v22, v8
add x4, x4, x6
fmla.2d v9, v6, v7
add.2d v2, v2, v8
mov x6, #65535
add.2d v1, v1, v9
movk x6, #61439, lsl 16
mov x10, #46800
movk x6, #62867, lsl 32
movk x10, #2568, lsl 16
movk x10, #1335, lsl 32
movk x6, #49889, lsl 48
movk x10, #17188, lsl 48
mul x6, x6, x7
dup.2d v7, x10
mov x10, #1
mov.16b v8, v21
fmla.2d v8, v6, v7
movk x10, #61440, lsl 16
fsub.2d v9, v22, v8
movk x10, #62867, lsl 32
fmla.2d v9, v6, v7
movk x10, #17377, lsl 48
add.2d v5, v5, v8
add.2d v2, v2, v9
mov x11, #28817
mov x12, #39040
movk x11, #31161, lsl 16
movk x12, #14704, lsl 16
movk x11, #59464, lsl 32
movk x12, #12839, lsl 32
movk x12, #17096, lsl 48
movk x11, #10291, lsl 48
dup.2d v7, x12
mov x12, #22621
mov.16b v8, v21
movk x12, #33153, lsl 16
fmla.2d v8, v6, v7
fsub.2d v9, v22, v8
movk x12, #17846, lsl 32
fmla.2d v9, v6, v7
movk x12, #47184, lsl 48
add.2d v3, v3, v8
add.2d v5, v5, v9
mov x13, #41001
mov x14, #140737488355328
movk x13, #57649, lsl 16
dup.2d v6, x14
movk x13, #20082, lsl 32
and.16b v6, v3, v6
cmeq.2d v6, v6, #0
movk x13, #12388, lsl 48
mov x14, #2
mul x15, x10, x6
movk x14, #57344, lsl 16
umulh x10, x10, x6
movk x14, #60199, lsl 32
movk x14, #3, lsl 48
cmn x15, x7
cinc x10, x10, hs
dup.2d v7, x14
mul x7, x11, x6
bic.16b v7, v7, v6
umulh x11, x11, x6
mov x14, #10364
movk x14, #11794, lsl 16
adds x7, x7, x10
cinc x10, x11, hs
movk x14, #3895, lsl 32
adds x5, x7, x5
cinc x7, x10, hs
movk x14, #9, lsl 48
mul x10, x12, x6
dup.2d v8, x14
bic.16b v8, v8, v6
umulh x11, x12, x6
mov x12, #26576
adds x7, x10, x7
cinc x10, x11, hs
movk x12, #47696, lsl 16
adds x7, x7, x8
cinc x8, x10, hs
movk x12, #688, lsl 32
movk x12, #3, lsl 48
mul x10, x13, x6
dup.2d v9, x12
umulh x6, x13, x6
bic.16b v9, v9, v6
adds x8, x10, x8
cinc x6, x6, hs
mov x10, #46800
movk x10, #2568, lsl 16
adds x8, x8, x9
cinc x6, x6, hs
movk x10, #1335, lsl 32
add x9, x4, x6
movk x10, #4, lsl 48
dup.2d v10, x10
mov x4, #2
bic.16b v10, v10, v6
movk x4, #57344, lsl 16
mov x6, #49763
movk x4, #60199, lsl 32
movk x6, #40165, lsl 16
movk x6, #24776, lsl 32
movk x4, #34755, lsl 48
dup.2d v11, x6
mov x6, #57634
bic.16b v6, v11, v6
movk x6, #62322, lsl 16
sub.2d v0, v0, v7
ssra.2d v0, v4, #52
movk x6, #53392, lsl 32
sub.2d v4, v1, v8
movk x6, #20583, lsl 48
ssra.2d v4, v0, #52
mov x10, #45242
sub.2d v7, v2, v9
ssra.2d v7, v4, #52
movk x10, #770, lsl 16
sub.2d v5, v5, v10
movk x10, #35693, lsl 32
ssra.2d v5, v7, #52
movk x10, #28832, lsl 48
sub.2d v6, v3, v6
ssra.2d v6, v5, #52
mov x11, #16467
ushr.2d v1, v4, #12
movk x11, #49763, lsl 16
ushr.2d v2, v7, #24
movk x11, #40165, lsl 32
ushr.2d v3, v5, #36
sli.2d v0, v4, #52
movk x11, #24776, lsl 48
sli.2d v1, v7, #40
subs x4, x5, x4
sbcs x6, x7, x6
sbcs x10, x8, x10
sbcs x11, x9, x11
sli.2d v2, v5, #28
sli.2d v3, v6, #16
tst x9, #9223372036854775808
csel x4, x4, x5, mi
csel x5, x6, x7, mi
csel x6, x10, x8, mi
csel x7, x11, x9, mi
ret
|
XsystemH/aCore
| 3,982
|
os/src/link_app.S
|
.align 3
.section .data
.global _num_app
_num_app:
.quad 17
.quad app_0_start
.quad app_1_start
.quad app_2_start
.quad app_3_start
.quad app_4_start
.quad app_5_start
.quad app_6_start
.quad app_7_start
.quad app_8_start
.quad app_9_start
.quad app_10_start
.quad app_11_start
.quad app_12_start
.quad app_13_start
.quad app_14_start
.quad app_15_start
.quad app_16_start
.quad app_16_end
.global _app_names
_app_names:
.string "exit"
.string "fantastic_text"
.string "forkexec"
.string "forktest"
.string "forktest2"
.string "forktest_simple"
.string "forktree"
.string "hello_world"
.string "initproc"
.string "matrix"
.string "sleep"
.string "sleep_simple"
.string "stack_overflow"
.string "user_shell"
.string "usertests"
.string "usertests-simple"
.string "yield"
.section .data
.global app_0_start
.global app_0_end
.align 3
app_0_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/exit"
app_0_end:
.section .data
.global app_1_start
.global app_1_end
.align 3
app_1_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/fantastic_text"
app_1_end:
.section .data
.global app_2_start
.global app_2_end
.align 3
app_2_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forkexec"
app_2_end:
.section .data
.global app_3_start
.global app_3_end
.align 3
app_3_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest"
app_3_end:
.section .data
.global app_4_start
.global app_4_end
.align 3
app_4_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest2"
app_4_end:
.section .data
.global app_5_start
.global app_5_end
.align 3
app_5_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest_simple"
app_5_end:
.section .data
.global app_6_start
.global app_6_end
.align 3
app_6_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktree"
app_6_end:
.section .data
.global app_7_start
.global app_7_end
.align 3
app_7_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/hello_world"
app_7_end:
.section .data
.global app_8_start
.global app_8_end
.align 3
app_8_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/initproc"
app_8_end:
.section .data
.global app_9_start
.global app_9_end
.align 3
app_9_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/matrix"
app_9_end:
.section .data
.global app_10_start
.global app_10_end
.align 3
app_10_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep"
app_10_end:
.section .data
.global app_11_start
.global app_11_end
.align 3
app_11_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep_simple"
app_11_end:
.section .data
.global app_12_start
.global app_12_end
.align 3
app_12_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/stack_overflow"
app_12_end:
.section .data
.global app_13_start
.global app_13_end
.align 3
app_13_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/user_shell"
app_13_end:
.section .data
.global app_14_start
.global app_14_end
.align 3
app_14_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests"
app_14_end:
.section .data
.global app_15_start
.global app_15_end
.align 3
app_15_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests-simple"
app_15_end:
.section .data
.global app_16_start
.global app_16_end
.align 3
app_16_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/yield"
app_16_end:
|
XsystemH/aCore
| 1,639
|
os/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
xuehaonan27/LuminOS
| 1,993
|
kernel/src/trap/trap.S
|
.attribute arch, "rv64gc" # Make LLVM happy
.set REGISTER_SIZE, 8 # On 64-bit machine, should be 4 one 32-bit machine
.set F_REGISTER_SIZE, 8 # On D Extension RISCV machine
.altmacro
.macro SAVE_GP n
sd x\n, \n*REGISTER_SIZE(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*REGISTER_SIZE(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*REGISTER_SIZE(sp)
# skip sp(x2), we will save it later
sd x3, 3*REGISTER_SIZE(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*REGISTER_SIZE(sp)
sd t1, 33*REGISTER_SIZE(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*REGISTER_SIZE(sp)
# load kernel_satp into t0
ld t0, 34*REGISTER_SIZE(sp)
# load trap_handler into t1
ld t1, 36*REGISTER_SIZE(sp)
# move to kernel_sp
ld sp, 35*REGISTER_SIZE(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*REGISTER_SIZE(sp)
ld t1, 33*REGISTER_SIZE(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general-purpuse registers except sp/tp
ld x1, 1*REGISTER_SIZE(sp)
ld x3, 3*REGISTER_SIZE(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*REGISTER_SIZE(sp)
sret
|
xuehaonan27/LuminOS
| 2,380
|
kernel/src/trap/trap_d_ext.S
|
.attribute arch, "rv64gc" # Make LLVM happy
.set REGISTER_SIZE, 8 # On 64-bit machine, should be 4 one 32-bit machine
.set F_REGISTER_SIZE, 8 # On D Extension RISCV machine
.altmacro
.macro SAVE_GP n
sd x\n, \n*REGISTER_SIZE(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*REGISTER_SIZE(sp)
.endm
.macro SAVE_FP n
fsd f\n, (34*REGISTER_SIZE+\n*F_REGISTER_SIZE)(sp)
.endm
.macro LOAD_FP n
fld f\n, (34*REGISTER_SIZE+\n*F_REGISTER_SIZE)(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*REGISTER_SIZE # general purpose registers and 2 csr registers
addi sp, sp, -32*F_REGISTER_SIZE # support float point registers
# save general-purpose registers
sd x1, 1*REGISTER_SIZE(sp)
# skip sp(x2), we will save it later
sd x3, 3*REGISTER_SIZE(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# save f0~f31
.set n, 0
.rept 32
SAVE_FP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*REGISTER_SIZE(sp)
sd t1, 33*REGISTER_SIZE(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*REGISTER_SIZE(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*REGISTER_SIZE(sp)
ld t1, 33*REGISTER_SIZE(sp)
ld t2, 2*REGISTER_SIZE(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*REGISTER_SIZE(sp)
ld x3, 3*REGISTER_SIZE(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# restore fload point registers
.set n, 0
.rept 32
LOAD_FP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 32*F_REGISTER_SIZE # support float point registers
addi sp, sp, 34*REGISTER_SIZE
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret
|
xuehaonan27/LuminOS
| 1,581
|
kernel/src/task/switch_d_ext.S
|
.attribute arch, "rv64gc" # Make LLVM happy
.set REGISTER_SIZE, 8 # On 64-bit machine
.set F_REGISTER_SIZE, 8 # On D Extension RISCV machine
.altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*REGISTER_SIZE(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*REGISTER_SIZE(a1)
.endm
.macro SAVE_FSN n
fsd fs\n, (14*REGISTER_SIZE+\n*F_REGISTER_SIZE)(a0)
.endm
.macro LOAD_FSN n
fld fs\n, (14*REGISTER_SIZE+\n*F_REGISTER_SIZE)(a1)
.endm
# Only ra, sp, sx registers need to be saved by assembly code.
# Other registers' saving and restoring should be handled by rustc.
.section .text
.globl __switch
__switch:
# Phase 1
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext,
# )
# Phase 2
# save kernel stack of current task
# a0 holds pointer to current TaskContext
# save kernel stack of current task
sd sp, REGISTER_SIZE(a0)
# save ra & s0 ~ s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n+1
.endr
# save fs0 ~ fs11 of current execution
.set n, 0
.rept 12
SAVE_FSN %n
.set n, n+1
.endr
# Phase 3
# restore fs0 ~ fs11 of next execution
.set n, 0
.rept 12
LOAD_FSN %n
.set n, n+1
.endr
# restore ra & s0~s11 of next execution
# a1 holds pointer to next TaskContext
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n+1
.endr
# restore kernel stack of next task
ld sp, REGISTER_SIZE(a1)
# Phase 4
ret
|
xuehaonan27/LuminOS
| 1,125
|
kernel/src/task/switch.S
|
.attribute arch, "rv64gc" # Make LLVM happy
.set REGISTER_SIZE, 8 # On 64-bit machine
.altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*REGISTER_SIZE(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*REGISTER_SIZE(a1)
.endm
# Only ra, sp, sx registers need to be saved by assembly code.
# Other registers' saving and restoring should be handled by rustc.
.section .text
.globl __switch
__switch:
# Phase 1
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext,
# )
# Phase 2
# save kernel stack of current task
# a0 holds pointer to current TaskContext
# save kernel stack of current task
sd sp, REGISTER_SIZE(a0)
# save ra & s0 ~ s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n+1
.endr
# Phase 3
# restore ra & s0~s11 of next execution
# a1 holds pointer to next TaskContext
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n+1
.endr
# restore kernel stack of next task
ld sp, REGISTER_SIZE(a1)
# Phase 4
ret
|
xukec/xk-rCore
| 2,538
|
os/src/trap/trap.S
|
.altmacro #加上才能正常使用.rept命令
#保存循环体
.macro SACE_GP n
sd x\n, \n*8(sp) #\n替换n参数
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2 #.align integer 2的integer次方个字节对齐 这里是将 __alltraps 的地址4字节对齐。(RISC-V 特权级规范的要求)
__alltraps:
#csrrw rd, csr, rs 将CSR当前的值读到通用寄存器rd中,然后将通用寄存器rs的值写入CSR。
#这里是交换 sp 和 sscratch 未交换前 sp -> user stack, sscratch -> kernel stack
csrrw sp, sscratch, sp # 执行完指令 sp -> kernel stack, sscratch -> user stack
#addi rd, rs1, imm 功能是把一个寄存器的值和一个12位的有符号的立即数相加,并把结果存入另一个寄存器。
#rd是目标寄存器,rs1是源寄存器,imm是12位的立即数。 这里是将 sp 自加 -34*8
addi sp, sp, -34*8 #在内核栈上保存 Trap 上下文,预先分配34*8字节的栈帧。地址区间[sp,sp+8*34)
#x0 被硬编码为 0 ,不会有变化;tp(x4) 寄存器,除非我们手动出于一些特殊用途使用它,否则一般也不会被用到
#数据传输指令,存双字。将x1中的数据储存到sp 上移 8字节
sd x1, 1*8(sp) #保存通用寄存器 保存到[sp+8,sp+16) 公式[sp+8n,sp+8(n+1))
# skip sp(x2), we will save it later .不保存 sp(x2)要基于它来找到每个寄存器应该被保存到的正确的位置
sd x3, 3*8(sp)
#保存x5-x31
.set n, 5 #设置n变量为5
.rept 27 #5~31 循环27次
SACE_GP %n #传递n
.set n, n+1 #n自加1
.endr
#将 CSR sstatus 和 sepc 的值分别读到寄存器 t0 和 t1 中然后保存到内核栈对应的位置上
#可以自由地使用t0/t1/t2,不用担心被覆盖,因为它们在上面已经被保存在内核栈上了
#csrr rd, csr 功能将 CSR 的值读到寄存器rd中
csrr t0, sstatus #之前特权级
csrr t1, sepc #记录trap发生前最后一条指令地址
sd t0, 32*8(sp)
sd t1, 33*8(sp)
#从sscratch中读用户栈地址到寄存器t2中,保存它到内核栈
csrr t2, sscratch
sd t2, 2*8(sp)
#设置trap_handler的输入参数(cx: &mut TrapContext)
#使寄存器 a0 指向 内核栈的栈指针也就是我们刚刚保存的 Trap 上下文的地址
#原因:接下来要调用 trap_handler 进行 Trap 处理,它的第一个参数 cx 由调用规范要从 a0 中获取。
#而 Trap 处理函数 trap_handler 需要 Trap 上下文的原因在于:它需要知道其中某些寄存器的值,
#比如在系统调用的时候应用程序传过来的 syscall ID 和对应参数。我们不能直接使用这些寄存器现在的值,
#因为它们可能已经被修改了,因此要去内核栈上找已经被保存下来的值。
mv a0, sp #将sp的值复制到a0中
call trap_handler
__restore:
#开始运行 app 通过__restore
#在处理玩trap返回到用户态
#mv sp, a0 #(猜测:可能保存了返回值要传递回来)
#(__switch 已经正确指向了需要的 Trap 上下文地址)
#现在sp->内核栈(分配后),sscratch->用户栈
#先恢复 CSR 再恢复通用寄存器,这样我们使用的三个临时寄存器才能被正确恢复
#恢复CSR
#数据传输指令,取双字。将32*8(sp)中的数据取到t0寄存器
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
#向控制和状态寄存器中写入数据 csrr csr, rd 功能将 rd 的值写到寄存器CSR中
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
#恢复通用寄存器
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
#在内核栈回收Trap上下文
addi sp, sp, 34*8
#交换 sp 和 sscratch sp->user stack, sscratch->kernel stack
csrrw sp, sscratch, sp
#返回到上一个模式 回到 U 特权级继续运行app
sret
|
xukec/xk-rCore
| 859
|
os/src/task/switch.S
|
.altmacro # 加上才能正常使用.rept命令
# 保存循环体
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0) #\n替换n参数
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# 阶段1
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# 阶段2
# save kernel stack of current task
# 数据传输指令,存双字。将sp中的数据储存到a0 上移 8字节
# 保存通用寄存器 保存到[a0+8,a0+16) 公式[a0+8n,a0+8(n+1))
sd sp, 8(a0)
# 保存当前执行的 ra 和 s0~s11
# ra 保存到[a0,a0+8)
sd ra, 0(a0)
# 保存s0-s11
.set n, 0 #设置n变量为0
.rept 12 #0~11 循环12次
SAVE_SN %n #传递n
.set n, n+1 #n自加1
.endr
# 阶段3
# 恢复下次执行的 ra 和 s0~s11
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# 恢复下一个任务的内核栈
ld sp, 8(a1)
# 阶段4
ret
|
xunxue01/rcore-ch
| 1,488
|
os/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret
|
XxChang/arceos
| 2,001
|
modules/axhal/linker.lds.S
|
OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
_srodata = .;
.rodata : ALIGN(4K) {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
}
.init_array : ALIGN(0x10) {
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
}
. = ALIGN(4K);
_erodata = .;
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * %SMP%;
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : AT(.) ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
axns_resource : { *(axns_resource) }
}
INSERT AFTER .tbss;
|
XxChang/arceos
| 4,325
|
modules/axhal/src/platform/x86_pc/multiboot.S
|
# Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
XxChang/arceos
| 1,965
|
modules/axhal/src/platform/x86_pc/ap_start.S
|
# Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
XxChang/arceos
| 2,544
|
tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s
|
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g2:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_ABS x0, __bss_start
ADR_ABS x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_relocate_binary
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Next, relocate the binary.
.L_relocate_binary:
ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to.
ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to.
ADR_ABS x2, __binary_nonzero_end_exclusive
.L_copy_loop:
ldr x3, [x0], #8
str x3, [x1], #8
cmp x1, x2
b.lo .L_copy_loop
// Prepare the jump to Rust code.
// Set the stack pointer.
ADR_ABS x0, __boot_core_stack_end_exclusive
mov sp, x0
// Jump to the relocated Rust code.
ADR_ABS x1, _start_rust
br x1
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
Ya0rk/YooOs
| 3,421
|
os/src/trap/trap.s
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro SAVE_GP_RANGE start, end
.set n, start
.rept end - start + 1
SAVE_GP %n
.set n, n + 1
.endr
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.macro LOAD_GP_RANGE start, end
.set n, start
.rept end - start + 1
LOAD_GP %n
.set n, n + 1
.endr
.endm
.section .text
.globl __trap_from_user
.globl __return_to_user
.globl __trap_from_kernel
.align 2
# user -> kernel
__trap_from_user:
# sp <-> sscratch , 用户栈顶保存在sscratch
csrrw sp, sscratch, sp
# 现在sp 指向 TrapContext, 而sscratch指向用户栈顶
# 保存TrapContext中的寄存器x1
# sd x1, 1*8(sp)
SAVE_GP 1
# 这里跳过保存x2,而是从x3 - x31
# 后面会保存x2
# 这里是一个循环+宏
SAVE_GP_RANGE 3, 31
# 在上一行已经保存过t0-t2,现在可以使用
# 保存sstatus sepc
csrr t0, sstatus
csrr t1, sepc
# 将sstatus sepc放入创建的TrapContext对应位置中
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# sscratch中是用户sp,将sp存入TrapContext对应位置中
csrr t2, sscratch
sd t2, 2*8(sp)
# 解下来讲进入kernel, 所以要加载对应寄存器
# # move to kernel_sp
# 保存kernel的返回地址
ld ra, 35*8(sp)
# load callee-saved regs
# s0 - s11
ld s0, 36*8(sp)
ld s1, 37*8(sp)
ld s2, 38*8(sp)
ld s3, 39*8(sp)
ld s4, 40*8(sp)
ld s5, 41*8(sp)
ld s6, 42*8(sp)
ld s7, 43*8(sp)
ld s8, 44*8(sp)
ld s9, 45*8(sp)
ld s10, 46*8(sp)
ld s11, 47*8(sp)
# load kernel fp tp
ld fp, 48*8(sp)
ld tp, 49*8(sp)
# 最后加载kernel的栈顶指针
ld sp, 34*8(sp)
# return to kernel ra
ret
# kernel -> user
__return_to_user:
# 此时a0是TrapContext指针
# switch to user space
# 更新sscratch,让sscratch中再次存入TrapContext地址,方便下次使用
csrw sscratch, a0
# 保存kernel的寄存器
# 这里对应上面trap_from_user的一系列ld kernel寄存器
sd sp, 34*8(a0) # 保存栈顶指针
sd ra, 35*8(a0) # 保存ra,返回地址
sd s0, 36*8(a0) # 保存s1 - s11
sd s1, 37*8(a0)
sd s2, 38*8(a0)
sd s3, 39*8(a0)
sd s4, 40*8(a0)
sd s5, 41*8(a0)
sd s6, 42*8(a0)
sd s7, 43*8(a0)
sd s8, 44*8(a0)
sd s9, 45*8(a0)
sd s10, 46*8(a0)
sd s11, 47*8(a0)
sd fp, 48*8(a0) # 保存fp
sd tp, 49*8(a0) # 保存tp
# 将栈顶指向TrapContext
mv sp, a0
# now sp points to TrapContext in kernel space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# 恢复用户寄存器, x0和sp在后面单独恢复
ld x1, 1*8(sp)
LOAD_GP_RANGE 3, 31
# 恢复用户栈顶指针
ld sp, 2*8(sp)
# 返回到用户
sret
# 处理内核自己的trap
# kernel -> kernel
__trap_from_kernel:
# only need to save caller-saved regs
# note that we don't save sepc & stvec here
addi sp, sp, -17*8
sd ra, 1*8(sp)
sd t0, 2*8(sp)
sd t1, 3*8(sp)
sd t2, 4*8(sp)
sd t3, 5*8(sp)
sd t4, 6*8(sp)
sd t5, 7*8(sp)
sd t6, 8*8(sp)
sd a0, 9*8(sp)
sd a1, 10*8(sp)
sd a2, 11*8(sp)
sd a3, 12*8(sp)
sd a4, 13*8(sp)
sd a5, 14*8(sp)
sd a6, 15*8(sp)
sd a7, 16*8(sp)
call kernel_trap_handler
ld ra, 1*8(sp)
ld t0, 2*8(sp)
ld t1, 3*8(sp)
ld t2, 4*8(sp)
ld t3, 5*8(sp)
ld t4, 6*8(sp)
ld t5, 7*8(sp)
ld t6, 8*8(sp)
ld a0, 9*8(sp)
ld a1, 10*8(sp)
ld a2, 11*8(sp)
ld a3, 12*8(sp)
ld a4, 13*8(sp)
ld a5, 14*8(sp)
ld a6, 15*8(sp)
ld a7, 16*8(sp)
addi sp, sp, 17*8
sret
|
yampiii/asde
| 7,962
|
tests/syntax-tests/highlighted/ARM Assembly/test.S
|
[38;2;248;248;242m.[0m[38;2;248;248;242mdata[0m
[38;2;249;38;114m.balign[0m[38;2;190;132;255m 4[0m
[38;2;248;248;242mred[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;248;248;242mgreen[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;248;248;242mblue[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;249;38;114m.text[0m
[38;2;249;38;114m.global[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale[0m
[38;2;249;38;114m.func[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale[0m
[38;2;248;248;242mgrayscale[0m[38;2;248;248;242m:[0m
[38;2;248;248;242massign[0m[38;2;248;248;242m:[0m
[38;2;248;248;242m [0m[38;2;117;113;94m/* some comment */[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_red[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_green[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr3[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_blue[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr3[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mstmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr4[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr8[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_red[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_green[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr4[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_blue[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr5[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m[38;2;248;248;242m [0m[38;2;117;113;94m/* another comment */[0m
[38;2;248;248;242mgrayscale_loop[0m[38;2;248;248;242m:[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr7[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr7[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr4[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr7[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr8[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr8[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr5[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr8[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr7[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr8[0m
[38;2;248;248;242m [0m[38;2;102;217;239masr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #8[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr2[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr2[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr2[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239msub[0m[38;2;248;248;242m [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mcmp[0m[38;2;248;248;242m [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #0[0m
[38;2;248;248;242m [0m[38;2;102;217;239mbne[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale_loop[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr4[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr8[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr0[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr1[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mbx[0m[38;2;248;248;242m [0m[38;2;248;248;242mlr[0m
[38;2;248;248;242maddr_red[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mred[0m
[38;2;248;248;242maddr_green[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mgreen[0m
[38;2;248;248;242maddr_blue[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mblue[0m
|
Ya0rk/YooOs
| 3,421
|
os/src/trap/trap.s
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro SAVE_GP_RANGE start, end
.set n, start
.rept end - start + 1
SAVE_GP %n
.set n, n + 1
.endr
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.macro LOAD_GP_RANGE start, end
.set n, start
.rept end - start + 1
LOAD_GP %n
.set n, n + 1
.endr
.endm
.section .text
.globl __trap_from_user
.globl __return_to_user
.globl __trap_from_kernel
.align 2
# user -> kernel
__trap_from_user:
# sp <-> sscratch , 用户栈顶保存在sscratch
csrrw sp, sscratch, sp
# 现在sp 指向 TrapContext, 而sscratch指向用户栈顶
# 保存TrapContext中的寄存器x1
# sd x1, 1*8(sp)
SAVE_GP 1
# 这里跳过保存x2,而是从x3 - x31
# 后面会保存x2
# 这里是一个循环+宏
SAVE_GP_RANGE 3, 31
# 在上一行已经保存过t0-t2,现在可以使用
# 保存sstatus sepc
csrr t0, sstatus
csrr t1, sepc
# 将sstatus sepc放入创建的TrapContext对应位置中
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# sscratch中是用户sp,将sp存入TrapContext对应位置中
csrr t2, sscratch
sd t2, 2*8(sp)
# 解下来讲进入kernel, 所以要加载对应寄存器
# # move to kernel_sp
# 保存kernel的返回地址
ld ra, 35*8(sp)
# load callee-saved regs
# s0 - s11
ld s0, 36*8(sp)
ld s1, 37*8(sp)
ld s2, 38*8(sp)
ld s3, 39*8(sp)
ld s4, 40*8(sp)
ld s5, 41*8(sp)
ld s6, 42*8(sp)
ld s7, 43*8(sp)
ld s8, 44*8(sp)
ld s9, 45*8(sp)
ld s10, 46*8(sp)
ld s11, 47*8(sp)
# load kernel fp tp
ld fp, 48*8(sp)
ld tp, 49*8(sp)
# 最后加载kernel的栈顶指针
ld sp, 34*8(sp)
# return to kernel ra
ret
# kernel -> user
__return_to_user:
# 此时a0是TrapContext指针
# switch to user space
# 更新sscratch,让sscratch中再次存入TrapContext地址,方便下次使用
csrw sscratch, a0
# 保存kernel的寄存器
# 这里对应上面trap_from_user的一系列ld kernel寄存器
sd sp, 34*8(a0) # 保存栈顶指针
sd ra, 35*8(a0) # 保存ra,返回地址
sd s0, 36*8(a0) # 保存s1 - s11
sd s1, 37*8(a0)
sd s2, 38*8(a0)
sd s3, 39*8(a0)
sd s4, 40*8(a0)
sd s5, 41*8(a0)
sd s6, 42*8(a0)
sd s7, 43*8(a0)
sd s8, 44*8(a0)
sd s9, 45*8(a0)
sd s10, 46*8(a0)
sd s11, 47*8(a0)
sd fp, 48*8(a0) # 保存fp
sd tp, 49*8(a0) # 保存tp
# 将栈顶指向TrapContext
mv sp, a0
# now sp points to TrapContext in kernel space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# 恢复用户寄存器, x0和sp在后面单独恢复
ld x1, 1*8(sp)
LOAD_GP_RANGE 3, 31
# 恢复用户栈顶指针
ld sp, 2*8(sp)
# 返回到用户
sret
# 处理内核自己的trap
# kernel -> kernel
__trap_from_kernel:
# only need to save caller-saved regs
# note that we don't save sepc & stvec here
addi sp, sp, -17*8
sd ra, 1*8(sp)
sd t0, 2*8(sp)
sd t1, 3*8(sp)
sd t2, 4*8(sp)
sd t3, 5*8(sp)
sd t4, 6*8(sp)
sd t5, 7*8(sp)
sd t6, 8*8(sp)
sd a0, 9*8(sp)
sd a1, 10*8(sp)
sd a2, 11*8(sp)
sd a3, 12*8(sp)
sd a4, 13*8(sp)
sd a5, 14*8(sp)
sd a6, 15*8(sp)
sd a7, 16*8(sp)
call kernel_trap_handler
ld ra, 1*8(sp)
ld t0, 2*8(sp)
ld t1, 3*8(sp)
ld t2, 4*8(sp)
ld t3, 5*8(sp)
ld t4, 6*8(sp)
ld t5, 7*8(sp)
ld t6, 8*8(sp)
ld a0, 9*8(sp)
ld a1, 10*8(sp)
ld a2, 11*8(sp)
ld a3, 12*8(sp)
ld a4, 13*8(sp)
ld a5, 14*8(sp)
ld a6, 15*8(sp)
ld a7, 16*8(sp)
addi sp, sp, 17*8
sret
|
yampiii/asde
| 7,962
|
tests/syntax-tests/highlighted/ARM Assembly/test.S
|
[38;2;248;248;242m.[0m[38;2;248;248;242mdata[0m
[38;2;249;38;114m.balign[0m[38;2;190;132;255m 4[0m
[38;2;248;248;242mred[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;248;248;242mgreen[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;248;248;242mblue[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;249;38;114m.text[0m
[38;2;249;38;114m.global[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale[0m
[38;2;249;38;114m.func[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale[0m
[38;2;248;248;242mgrayscale[0m[38;2;248;248;242m:[0m
[38;2;248;248;242massign[0m[38;2;248;248;242m:[0m
[38;2;248;248;242m [0m[38;2;117;113;94m/* some comment */[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_red[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_green[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr3[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_blue[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr3[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mstmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr4[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr8[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_red[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_green[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr4[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_blue[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr5[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m[38;2;248;248;242m [0m[38;2;117;113;94m/* another comment */[0m
[38;2;248;248;242mgrayscale_loop[0m[38;2;248;248;242m:[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr7[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr7[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr4[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr7[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr8[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr8[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr5[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr8[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr7[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr8[0m
[38;2;248;248;242m [0m[38;2;102;217;239masr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #8[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr2[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr2[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr2[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239msub[0m[38;2;248;248;242m [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mcmp[0m[38;2;248;248;242m [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #0[0m
[38;2;248;248;242m [0m[38;2;102;217;239mbne[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale_loop[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr4[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr8[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr0[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr1[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mbx[0m[38;2;248;248;242m [0m[38;2;248;248;242mlr[0m
[38;2;248;248;242maddr_red[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mred[0m
[38;2;248;248;242maddr_green[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mgreen[0m
[38;2;248;248;242maddr_blue[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mblue[0m
|
yashodipmore/RISC-V-CPU-Simulator-Assembler
| 1,256
|
examples/fibonacci.s
|
# Fibonacci Sequence Calculator in RISC-V Assembly
# Calculates the first 10 numbers in the Fibonacci sequence
.text
main:
# Initialize registers
addi x1, x0, 0 # x1 = 0 (first Fibonacci number)
addi x2, x0, 1 # x2 = 1 (second Fibonacci number)
addi x3, x0, 10 # x3 = 10 (counter)
addi x4, x0, 0 # x4 = 0 (current index)
# Print first two numbers (assuming they're already in x1 and x2)
fibonacci_loop:
# Check if we've calculated enough numbers
beq x4, x3, end_program
# Calculate next Fibonacci number: x5 = x1 + x2
add x5, x1, x2
# Update for next iteration
mv x1, x2 # x1 = previous x2
mv x2, x5 # x2 = new Fibonacci number
# Increment counter
addi x4, x4, 1
# Store result in memory (optional - for demonstration)
slli x6, x4, 2 # x6 = x4 * 4 (word offset)
addi x7, x0, 0x1000 # Base address for results
add x8, x7, x6 # Calculate final address
sw x2, 0(x8) # Store Fibonacci number
# Continue loop
j fibonacci_loop
end_program:
# Program complete - infinite loop to halt
j end_program
.data
# Reserve space for results
results: .space 40 # Space for 10 words
|
yashodipmore/RISC-V-CPU-Simulator-Assembler
| 1,863
|
examples/sorting.s
|
# Bubble Sort Implementation in RISC-V Assembly
# Sorts an array of integers using bubble sort algorithm
.text
main:
# Initialize array parameters
addi x1, x0, 8 # Array length
addi x2, x0, 0x1000 # Array base address
# Initialize test data in memory
addi x3, x0, 64 # First element
sw x3, 0(x2)
addi x3, x0, 34
sw x3, 4(x2)
addi x3, x0, 25
sw x3, 8(x2)
addi x3, x0, 12
sw x3, 12(x2)
addi x3, x0, 22
sw x3, 16(x2)
addi x3, x0, 11
sw x3, 20(x2)
addi x3, x0, 90
sw x3, 24(x2)
addi x3, x0, 5
sw x3, 28(x2)
bubble_sort:
addi x4, x0, 0 # i = 0 (outer loop counter)
outer_loop:
beq x4, x1, sort_complete # if i == length, exit
addi x5, x0, 0 # j = 0 (inner loop counter)
sub x6, x1, x4 # length - i
addi x6, x6, -1 # length - i - 1
inner_loop:
beq x5, x6, outer_next # if j == length-i-1, next outer iteration
# Calculate addresses for array[j] and array[j+1]
slli x7, x5, 2 # j * 4
add x8, x2, x7 # address of array[j]
lw x9, 0(x8) # Load array[j]
lw x10, 4(x8) # Load array[j+1]
# Compare and swap if necessary
blt x9, x10, no_swap # if array[j] < array[j+1], no swap needed
# Swap elements
sw x10, 0(x8) # array[j] = array[j+1]
sw x9, 4(x8) # array[j+1] = array[j]
no_swap:
addi x5, x5, 1 # j++
j inner_loop
outer_next:
addi x4, x4, 1 # i++
j outer_loop
sort_complete:
# Verification: Load sorted values (optional)
lw x11, 0(x2) # Load first element
lw x12, 4(x2) # Load second element
lw x13, 8(x2) # Load third element
# Infinite loop to halt execution
j sort_complete
.data
array: .space 32 # Space for 8 integers
|
yashodipmore/RISC-V-CPU-Simulator-Assembler
| 1,143
|
examples/fibonacci_simple.s
|
# Simple Fibonacci Sequence Calculator in RISC-V Assembly
# Calculates the first 5 numbers in the Fibonacci sequence
main:
# Initialize registers
addi x1, x0, 0 # x1 = 0 (first Fibonacci number)
addi x2, x0, 1 # x2 = 1 (second Fibonacci number)
addi x3, x0, 5 # x3 = 5 (counter)
addi x4, x0, 0 # x4 = 0 (current index)
fibonacci_loop:
# Check if we've calculated enough numbers
beq x4, x3, end_program
# Calculate next Fibonacci number: x5 = x1 + x2
add x5, x1, x2
# Update for next iteration
addi x1, x2, 0 # x1 = x2 (move x2 to x1)
addi x2, x5, 0 # x2 = x5 (move x5 to x2)
# Increment counter
addi x4, x4, 1
# Store result in memory (optional - for demonstration)
slli x6, x4, 2 # x6 = x4 * 4 (word offset)
addi x7, x0, 1000 # Base address for results (smaller address)
add x8, x7, x6 # Calculate final address
sw x2, 0(x8) # Store Fibonacci number
# Continue loop
jal x0, fibonacci_loop
end_program:
# Program complete - infinite loop to halt
jal x0, end_program
|
yavuztackin/Zybo_Z20_Vitis_LEDBlink-HelloWorldUART-Examples
| 5,201
|
helloworldminizedproject/zynq_fsbl/fsbl_handoff.S
|
/******************************************************************************
*
* Copyright (c) 2012 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file handoff.S
*
* Contains the code that does the handoff to the loaded application. This
* code lives high in the ROM.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date.word Changes
* ----- ---- -------- -------------------------------------------------------
* 1.00a ecm 03/01/10 Initial release
* 7.00a kc 10/23/13 Added support for armcc compiler
* </pre>
*
* @note
* Assumes that the starting address of the FSBL is provided by the calling routine
* in R0.
*
******************************************************************************/
#ifdef __GNUC__
.globl FsblHandoffJtagExit
.globl FsblHandoffExit
.section .handoff,"axS"
/***************************** Include Files *********************************/
/************************** Constant Definitions *****************************/
/**************************** Type Definitions *******************************/
/***************** Macros (Inline Functions) Definitions *********************/
/************************** Function Prototypes ******************************/
/************************** Variable Definitions *****************************/
FsblHandoffJtagExit:
mcr 15,0,r0,cr7,cr5,0 /* Invalidate Instruction cache */
mcr 15,0,r0,cr7,cr5,6 /* Invalidate branch predictor array */
dsb
isb /* make sure it completes */
ldr r4, =0
mcr 15,0,r4,cr1,cr0,0 /* disable the ICache and MMU */
isb /* make sure it completes */
Loop:
wfe
b Loop
FsblHandoffExit:
mov lr, r0 /* move the destination address into link register */
mcr 15,0,r0,cr7,cr5,0 /* Invalidate Instruction cache */
mcr 15,0,r0,cr7,cr5,6 /* Invalidate branch predictor array */
dsb
isb /* make sure it completes */
ldr r4, =0
mcr 15,0,r4,cr1,cr0,0 /* disable the ICache and MMU */
isb /* make sure it completes */
bx lr /* force the switch, destination should have been in r0 */
.Ldone: b .Ldone /* Paranoia: we should never get here */
.end
#elif defined (__IASMARM__)
PUBLIC FsblHandoffJtagExit
PUBLIC FsblHandoffExit
SECTION .handoff:CODE:NOROOT(2)
/***************************** Include Files *********************************/
/************************** Constant Definitions *****************************/
/**************************** Type Definitions *******************************/
/***************** Macros (Inline Functions) Definitions *********************/
/************************** Function Prototypes ******************************/
/************************** Variable Definitions *****************************/
FsblHandoffJtagExit
mcr p15,0,r0,c7,c5,0 ;/* Invalidate Instruction cache */
mcr p15,0,r0,c7,c5,6 ;/* Invalidate branch predictor array */
dsb
isb ;/* make sure it completes */
ldr r4, =0
mcr p15,0,r4,c1,c0,0 ;/* disable the ICache and MMU */
isb ;/* make sure it completes */
Loop
wfe
b Loop
FsblHandoffExit
mov lr, r0 ;/* move the destination address into link register */
mcr p15,0,r0,c7,c5,0 ;/* Invalidate Instruction cache */
mcr p15,0,r0,c7,c5,6 ;/* Invalidate branch predictor array */
dsb
isb ;/* make sure it completes */
ldr r4, =0
mcr p15,0,r4,c1,c0,0 ;/* disable the ICache and MMU */
isb ;/* make sure it completes */
bx lr ;/* force the switch, destination should have been in r0 */
.Ldone
b .Ldone ;/* Paranoia: we should never get here */
END
#else
EXPORT FsblHandoffJtagExit
EXPORT FsblHandoffExit
AREA |.handoff|,CODE
;/***************************** Include Files *********************************/
;/************************** Constant Definitions *****************************/
;/**************************** Type Definitions *******************************/
;/***************** Macros (Inline Functions) Definitions *********************/
;/************************** Function Prototypes ******************************/
;/************************** Variable Definitions *****************************/
FsblHandoffJtagExit
mcr p15,0,r0,c7,c5,0 ;/* Invalidate Instruction cache */
mcr p15,0,r0,c7,c5,6 ;/* Invalidate branch predictor array */
dsb
isb ;/* make sure it completes */
ldr r4, =0
mcr p15,0,r4,c1,c0,0 ;/* disable the ICache and MMU */
isb ;/* make sure it completes */
Loop
wfe
b Loop
FsblHandoffExit
mov lr, r0 ;/* move the destination address into link register */
mcr p15,0,r0,c7,c5,0 ;/* Invalidate Instruction cache */
mcr p15,0,r0,c7,c5,6 ;/* Invalidate branch predictor array */
dsb
isb ;/* make sure it completes */
ldr r4, =0
mcr p15,0,r4,c1,c0,0 ;/* disable the ICache and MMU */
isb ;/* make sure it completes */
bx lr ;/* force the switch, destination should have been in r0 */
Ldone b Ldone ;/* Paranoia: we should never get here */
END
#endif
|
yavuztackin/Zybo_Z20_Vitis_LEDBlink-HelloWorldUART-Examples
| 8,023
|
helloworldminizedproject/ps7_cortexa9_0/standalone_domain/bsp/ps7_cortexa9_0/libsrc/standalone_v8_0/src/translation_table.S
|
/******************************************************************************
* Copyright (c) 2009 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.S
*
* @addtogroup a9_boot_code
* @{
* <h2> translation_table.S </h2>
* The translation_table.S contains a static page table required by MMU for
* cortex-A9. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for zynq architecture. It
* utilizes short descriptor translation table format with each section defining
* 1 MB of memory.
*
* The overview of translation table memory attributes is described below.
*
*| | Memory Range | Definition in Translation Table |
*|-----------------------|-------------------------|-----------------------------------|
*| DDR | 0x00000000 - 0x3FFFFFFF | Normal write-back Cacheable |
*| PL | 0x40000000 - 0xBFFFFFFF | Strongly Ordered |
*| Reserved | 0xC0000000 - 0xDFFFFFFF | Unassigned |
*| Memory mapped devices | 0xE0000000 - 0xE02FFFFF | Device Memory |
*| Reserved | 0xE0300000 - 0xE0FFFFFF | Unassigned |
*| NAND, NOR | 0xE1000000 - 0xE3FFFFFF | Device memory |
*| SRAM | 0xE4000000 - 0xE5FFFFFF | Normal write-back Cacheable |
*| Reserved | 0xE6000000 - 0xF7FFFFFF | Unassigned |
*| AMBA APB Peripherals | 0xF8000000 - 0xF8FFFFFF | Device Memory |
*| Reserved | 0xF9000000 - 0xFBFFFFFF | Unassigned |
*| Linear QSPI - XIP | 0xFC000000 - 0xFDFFFFFF | Normal write-through cacheable |
*| Reserved | 0xFE000000 - 0xFFEFFFFF | Unassigned |
*| OCM | 0xFFF00000 - 0xFFFFFFFF | Normal inner write-back cacheable |
*
* For region 0x00000000 - 0x3FFFFFFF, a system where DDR is less than 1 GB,
* region after DDR and before PL is marked as undefined/reserved in translation
* table. In 0xF8000000 - 0xF8FFFFFF, 0xF8000C00 - 0xF8000FFF, 0xF8010000 -
* 0xF88FFFFF and 0xF8F03000 to 0xF8FFFFFF are reserved but due to granual size
* of 1 MB, it is not possible to define separate regions for them. For region
* 0xFFF00000 - 0xFFFFFFFF, 0xFFF00000 to 0xFFFB0000 is reserved but due to 1MB
* granual size, it is not possible to define separate region for it.
*
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 1.00a ecm 10/20/09 Initial version
* 3.04a sdm 01/13/12 Updated MMU table to mark DDR memory as Shareable
* 3.07a sgd 07/05/2012 Configuring device address spaces as shareable device
* instead of strongly-ordered.
* 3.07a asa 07/17/2012 Changed the property of the ".mmu_tbl" section.
* 4.2 pkp 09/02/2014 added entries for 0xfe000000 to 0xffefffff as reserved
* and 0xe0000000 - 0xe1ffffff is broken down into
* 0xe0000000 - 0xe02fffff (memory mapped divides)
* 0xe0300000 - 0xe0ffffff (reserved) and
* 0xe1000000 - 0xe1ffffff (NAND)
* 5.2 pkp 06/08/2015 put a check for XPAR_PS7_DDR_0_S_AXI_BASEADDR to confirm
* if DDR is present or not and accordingly generate the
* translation table
* 6.1 pkp 07/11/2016 Corrected comments for memory attributes
* 6.8 mus 07/12/2018 Mark DDR memory as inner cacheable, if BSP is built
* with the USE_AMP flag.
* </pre>
*
*
******************************************************************************/
#include "xparameters.h"
.globl MMUTable
.section .mmu_tbl,"a"
MMUTable:
/* Each table entry occupies one 32-bit word and there are
* 4096 entries, so the entire table takes up 16KB.
* Each entry covers a 1MB section.
*/
.set SECT, 0
#ifdef XPAR_PS7_DDR_0_S_AXI_BASEADDR
.set DDR_START, XPAR_PS7_DDR_0_S_AXI_BASEADDR
.set DDR_END, XPAR_PS7_DDR_0_S_AXI_HIGHADDR
.set DDR_SIZE, (DDR_END - DDR_START)+1
.set DDR_REG, DDR_SIZE/0x100000
#else
.set DDR_REG, 0
#endif
.set UNDEF_REG, 0x3FF - DDR_REG
#ifndef USE_AMP
/*0x00000000 - 0x00100000 (inner and outer cacheable )*/
.word SECT + 0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
#else
/*0x00000000 - 0x00100000 (inner cacheable )*/
.word SECT + 0x14de6 /* S=b1 TEX=b100 AP=b11, Domain=b1111, C=b0, B=b1 */
#endif
.set SECT, SECT+0x100000
.rept DDR_REG /* (DDR Cacheable) */
.word SECT + 0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept UNDEF_REG /* (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0400 /* 0x40000000 - 0x7fffffff (FPGA slave0) */
.word SECT + 0xc02 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0400 /* 0x80000000 - 0xbfffffff (FPGA slave1) */
.word SECT + 0xc02 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0200 /* 0xc0000000 - 0xdfffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x003 /* 0xe0000000 - 0xe02fffff (Memory mapped devices)
* UART/USB/IIC/SPI/CAN/GEM/GPIO/QSPI/SD/NAND */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0D /* 0xe0300000 - 0xe0ffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0010 /* 0xe1000000 - 0xe1ffffff (NAND) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0020 /* 0xe2000000 - 0xe3ffffff (NOR) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0020 /* 0xe4000000 - 0xe5ffffff (SRAM) */
.word SECT + 0xc0e /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0120 /* 0xe6000000 - 0xf7ffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
/* 0xf8000c00 to 0xf8000fff, 0xf8010000 to 0xf88fffff and
0xf8f03000 to 0xf8ffffff are reserved but due to granual size of
1MB, it is not possible to define separate regions for them */
.rept 0x0010 /* 0xf8000000 - 0xf8ffffff (AMBA APB Peripherals) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0030 /* 0xf9000000 - 0xfbffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0020 /* 0xfc000000 - 0xfdffffff (Linear QSPI - XIP) */
.word SECT + 0xc0a /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x001F /* 0xfe000000 - 0xffefffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
/* 0xfff00000 to 0xfffb0000 is reserved but due to granual size of
1MB, it is not possible to define separate region for it
0xfff00000 - 0xffffffff
256K OCM when mapped to high address space
inner-cacheable */
.word SECT + 0x4c0e /* S=b0 TEX=b100 AP=b11, Domain=b0, C=b1, B=b1 */
.set SECT, SECT+0x100000
.end
/**
* @} End of "addtogroup a9_boot_code".
*/
|
yavuztackin/Zybo_Z20_Vitis_LEDBlink-HelloWorldUART-Examples
| 3,153
|
helloworldminizedproject/ps7_cortexa9_0/standalone_domain/bsp/ps7_cortexa9_0/libsrc/standalone_v8_0/src/xil-crt0.S
|
/******************************************************************************
* Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file xil-crt0.S
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 1.00a ecm 10/20/09 Initial version
* 3.05a sdm 02/02/12 Added code for profiling
* 3.06a sgd 05/16/12 Added global constructors and cleanup code
* Uart initialization based on compiler flag
* 3.07a sgd 07/05/12 Updated with reset and start Global Timer
* 3.07a sgd 10/19/12 SMC NOR and SRAM initialization with build option
* 4.2 pkp 08/04/14 Removed PEEP board related code which contained
* initialization of uart smc nor and sram
* 5.3 pkp 10/07/15 Added support for OpenAMP by not initializing global
* timer when USE_AMP flag is defined
* 6.6 srm 10/18/17 Added timer configuration using XTime_StartTTCTimer API.
* Now the TTC instance as specified by the user will be
* started.
* 7.7 adk 11/30/21 Added support for xiltimer library.
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "bspconfig.h"
#include "xparameters.h"
.file "xil-crt0.S"
.section ".got2","aw"
.align 2
.text
.Lsbss_start:
.long __sbss_start
.Lsbss_end:
.long __sbss_end
.Lbss_start:
.long __bss_start
.Lbss_end:
.long __bss_end
.Lstack:
.long __stack
.globl _start
_start:
bl __cpu_init /* Initialize the CPU first (BSP provides this) */
mov r0, #0
/* clear sbss */
ldr r1,.Lsbss_start /* calculate beginning of the SBSS */
ldr r2,.Lsbss_end /* calculate end of the SBSS */
.Lloop_sbss:
cmp r1,r2
bge .Lenclsbss /* If no SBSS, no clearing required */
str r0, [r1], #4
b .Lloop_sbss
.Lenclsbss:
/* clear bss */
ldr r1,.Lbss_start /* calculate beginning of the BSS */
ldr r2,.Lbss_end /* calculate end of the BSS */
.Lloop_bss:
cmp r1,r2
bge .Lenclbss /* If no BSS, no clearing required */
str r0, [r1], #4
b .Lloop_bss
.Lenclbss:
/* set stack pointer */
ldr r13,.Lstack /* stack address */
/* Reset and start Global Timer */
mov r0, #0x0
mov r1, #0x0
/* Reset and start Triple Timer Counter */
#if defined SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
#ifndef XPAR_XILTIMER_ENABLED
#if USE_AMP != 1
bl XTime_SetTime
#endif
#endif
#ifdef PROFILING /* defined in Makefile */
/* Setup profiling stuff */
bl _profile_init
#endif /* PROFILING */
/* run global constructors */
bl __libc_init_array
/* make sure argc and argv are valid */
mov r0, #0
mov r1, #0
/* Let her rip */
bl main
/* Cleanup global constructors */
bl __libc_fini_array
#ifdef PROFILING
/* Cleanup profiling stuff */
bl _profile_clean
#endif /* PROFILING */
/* All done */
bl exit
.Lexit: /* should never get here */
b .Lexit
.Lstart:
.size _start,.Lstart-_start
|
yavuztackin/Zybo_Z20_Vitis_LEDBlink-HelloWorldUART-Examples
| 4,892
|
helloworldminizedproject/ps7_cortexa9_0/standalone_domain/bsp/ps7_cortexa9_0/libsrc/standalone_v8_0/src/asm_vectors.S
|
/******************************************************************************
* Copyright (c) 2009 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.S
*
* This file contains the initial vector table for the Cortex A9 processor
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 1.00a ecm/sdm 10/20/09 Initial version
* 3.05a sdm 02/02/12 Save lr when profiling is enabled
* 3.10a srt 04/18/13 Implemented ARM Erratas. Please refer to file
* 'xil_errata.h' for errata description
* 4.00a pkp 22/01/14 Modified return addresses for interrupt
* handlers (DataAbortHandler and SVCHandler)
* to fix CR#767251
* 5.1 pkp 05/13/15 Saved the addresses of instruction causing data
* abort and prefetch abort into DataAbortAddr and
* PrefetchAbortAddr for further use to fix CR#854523
* 5.4 pkp 12/03/15 Added handler for undefined exception
* 6.8 mus 04/27/18 Removed __ARM_NEON__ flag definition. Now,
* saving/restoring of of HW floating point register
* would be done through newly introduced flag
* FPU_HARD_FLOAT_ABI_ENABLED. This new flag will be
* configured based on the -mfpu-abi option in extra
* compiler flags.
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xil_errata.h"
#include "bspconfig.h"
.org 0
.text
.globl _vector_table
.section .vectors
_vector_table:
B _boot
B Undefined
B SVCHandler
B PrefetchAbortHandler
B DataAbortHandler
NOP /* Placeholder for address exception vector*/
B IRQHandler
B FIQHandler
IRQHandler: /* IRQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code*/
#if FPU_HARD_FLOAT_ABI_ENABLED
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
#endif
#ifdef PROFILING
ldr r2, =prof_pc
subs r3, lr, #0
str r3, [r2]
#endif
bl IRQInterrupt /* IRQ vector */
#if FPU_HARD_FLOAT_ABI_ENABLED
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
#endif
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
FIQHandler: /* FIQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
#if FPU_HARD_FLOAT_ABI_ENABLED
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
#endif
FIQLoop:
bl FIQInterrupt /* FIQ vector */
#if FPU_HARD_FLOAT_ABI_ENABLED
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
#endif
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
Undefined: /* Undefined handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =UndefinedExceptionAddr
sub r1, lr, #4
str r1, [r0] /* Store address of instruction causing undefined exception */
bl UndefinedException /* UndefinedException: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr
SVCHandler: /* SWI handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
tst r0, #0x20 /* check the T bit */
ldrneh r0, [lr,#-2] /* Thumb mode */
bicne r0, r0, #0xff00 /* Thumb mode */
ldreq r0, [lr,#-4] /* ARM mode */
biceq r0, r0, #0xff000000 /* ARM mode */
bl SWInterrupt /* SWInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr /*return to the next instruction after the SWI instruction */
DataAbortHandler: /* Data Abort handler */
#ifdef CONFIG_ARM_ERRATA_775420
dsb
#endif
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =DataAbortAddr
sub r1, lr, #8
str r1, [r0] /* Stores instruction causing data abort */
bl DataAbortInterrupt /*DataAbortInterrupt :call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #8 /* points to the instruction that caused the Data Abort exception */
PrefetchAbortHandler: /* Prefetch Abort handler */
#ifdef CONFIG_ARM_ERRATA_775420
dsb
#endif
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =PrefetchAbortAddr
sub r1, lr, #4
str r1, [r0] /* Stores instruction causing prefetch abort */
bl PrefetchAbortInterrupt /* PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* points to the instruction that caused the Prefetch Abort exception */
.end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.